WIP: local TradingAgents customizations through 2026-04-13
Bulk commit of accumulated local changes on the dtarkent2-sys fork. Spans agents, dataflows, llm_clients, graph orchestration, CLI, and docs. Primary work areas: - llm_clients/ — multi-LLM client layer (anthropic, google, openai, factory, base, validators) for swappable provider support - dataflows/alpaca_data.py — Alpaca integration alongside existing alpha_vantage and y_finance flows - agents/structured/ — portfolio, scoring, and tier1/2/3 layers - agents/analysts, researchers, risk_mgmt — local prompt and logic customizations - graph/ — orchestration tweaks (parallel_analysts, propagation, reflection, signal_processing, trading_graph) - alembic scaffolding inherited from prior commit - chainlit web UI design notes in docs/plans/ This is a single WIP snapshot to preserve work before any upstream merge. History can be cleaned up with interactive rebase later. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
b863d0939c
commit
8c48c3cffd
|
|
@ -1,8 +1,8 @@
|
|||
.git
|
||||
eval_results
|
||||
nvda_output*.txt
|
||||
docs
|
||||
uv.lock
|
||||
__pycache__
|
||||
.env
|
||||
.env.example
|
||||
.git
|
||||
eval_results
|
||||
nvda_output*.txt
|
||||
docs
|
||||
uv.lock
|
||||
__pycache__
|
||||
.env
|
||||
.env.example
|
||||
|
|
|
|||
12
.env.example
12
.env.example
|
|
@ -1,6 +1,6 @@
|
|||
# LLM Providers (set the one you use)
|
||||
OPENAI_API_KEY=
|
||||
GOOGLE_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
XAI_API_KEY=
|
||||
OPENROUTER_API_KEY=
|
||||
# LLM Providers (set the one you use)
|
||||
OPENAI_API_KEY=
|
||||
GOOGLE_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
XAI_API_KEY=
|
||||
OPENROUTER_API_KEY=
|
||||
|
|
|
|||
|
|
@ -1,221 +1,221 @@
|
|||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[codz]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
# Pipfile.lock
|
||||
|
||||
# UV
|
||||
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# uv.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
# poetry.lock
|
||||
# poetry.toml
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
||||
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
||||
# pdm.lock
|
||||
# pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# pixi
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
||||
# pixi.lock
|
||||
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
||||
# in the .venv directory. It is recommended not to include this directory in version control.
|
||||
.pixi
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# Redis
|
||||
*.rdb
|
||||
*.aof
|
||||
*.pid
|
||||
|
||||
# RabbitMQ
|
||||
mnesia/
|
||||
rabbitmq/
|
||||
rabbitmq-data/
|
||||
|
||||
# ActiveMQ
|
||||
activemq-data/
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.envrc
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
# .idea/
|
||||
|
||||
# Abstra
|
||||
# Abstra is an AI-powered process automation framework.
|
||||
# Ignore directories containing user credentials, local state, and settings.
|
||||
# Learn more at https://abstra.io/docs
|
||||
.abstra/
|
||||
|
||||
# Visual Studio Code
|
||||
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
||||
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
||||
# you could uncomment the following to ignore the entire vscode folder
|
||||
# .vscode/
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
# Marimo
|
||||
marimo/_static/
|
||||
marimo/_lsp/
|
||||
__marimo__/
|
||||
|
||||
# Streamlit
|
||||
.streamlit/secrets.toml
|
||||
|
||||
# Cache
|
||||
**/data_cache/
|
||||
eval_results/
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[codz]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
# Pipfile.lock
|
||||
|
||||
# UV
|
||||
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# uv.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
# poetry.lock
|
||||
# poetry.toml
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
||||
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
||||
# pdm.lock
|
||||
# pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# pixi
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
||||
# pixi.lock
|
||||
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
||||
# in the .venv directory. It is recommended not to include this directory in version control.
|
||||
.pixi
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# Redis
|
||||
*.rdb
|
||||
*.aof
|
||||
*.pid
|
||||
|
||||
# RabbitMQ
|
||||
mnesia/
|
||||
rabbitmq/
|
||||
rabbitmq-data/
|
||||
|
||||
# ActiveMQ
|
||||
activemq-data/
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.envrc
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
# .idea/
|
||||
|
||||
# Abstra
|
||||
# Abstra is an AI-powered process automation framework.
|
||||
# Ignore directories containing user credentials, local state, and settings.
|
||||
# Learn more at https://abstra.io/docs
|
||||
.abstra/
|
||||
|
||||
# Visual Studio Code
|
||||
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
||||
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
||||
# you could uncomment the following to ignore the entire vscode folder
|
||||
# .vscode/
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
# Marimo
|
||||
marimo/_static/
|
||||
marimo/_lsp/
|
||||
__marimo__/
|
||||
|
||||
# Streamlit
|
||||
.streamlit/secrets.toml
|
||||
|
||||
# Cache
|
||||
**/data_cache/
|
||||
eval_results/
|
||||
.env.railway
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
.git
|
||||
__pycache__
|
||||
*.pyc
|
||||
.env
|
||||
.git
|
||||
__pycache__
|
||||
*.pyc
|
||||
.env
|
||||
|
|
|
|||
402
LICENSE
402
LICENSE
|
|
@ -1,201 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
|
|||
438
README.md
438
README.md
|
|
@ -1,219 +1,219 @@
|
|||
<p align="center">
|
||||
<img src="assets/TauricResearch.png" style="width: 60%; height: auto;">
|
||||
</p>
|
||||
|
||||
<div align="center" style="line-height: 1;">
|
||||
<a href="https://arxiv.org/abs/2412.20138" target="_blank"><img alt="arXiv" src="https://img.shields.io/badge/arXiv-2412.20138-B31B1B?logo=arxiv"/></a>
|
||||
<a href="https://discord.com/invite/hk9PGKShPK" target="_blank"><img alt="Discord" src="https://img.shields.io/badge/Discord-TradingResearch-7289da?logo=discord&logoColor=white&color=7289da"/></a>
|
||||
<a href="./assets/wechat.png" target="_blank"><img alt="WeChat" src="https://img.shields.io/badge/WeChat-TauricResearch-brightgreen?logo=wechat&logoColor=white"/></a>
|
||||
<a href="https://x.com/TauricResearch" target="_blank"><img alt="X Follow" src="https://img.shields.io/badge/X-TauricResearch-white?logo=x&logoColor=white"/></a>
|
||||
<br>
|
||||
<a href="https://github.com/TauricResearch/" target="_blank"><img alt="Community" src="https://img.shields.io/badge/Join_GitHub_Community-TauricResearch-14C290?logo=discourse"/></a>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=de">Deutsch</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=es">Español</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=fr">français</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ja">日本語</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ko">한국어</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=pt">Português</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ru">Русский</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=zh">中文</a>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
# TradingAgents: Multi-Agents LLM Financial Trading Framework
|
||||
|
||||
## News
|
||||
- [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x) and improved system architecture.
|
||||
- [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon.
|
||||
|
||||
<div align="center">
|
||||
<a href="https://www.star-history.com/#TauricResearch/TradingAgents&Date">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date&theme=dark" />
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" />
|
||||
<img alt="TradingAgents Star History" src="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" style="width: 80%; height: auto;" />
|
||||
</picture>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
> 🎉 **TradingAgents** officially released! We have received numerous inquiries about the work, and we would like to express our thanks for the enthusiasm in our community.
|
||||
>
|
||||
> So we decided to fully open-source the framework. Looking forward to building impactful projects with you!
|
||||
|
||||
<div align="center">
|
||||
|
||||
🚀 [TradingAgents](#tradingagents-framework) | ⚡ [Installation & CLI](#installation-and-cli) | 🎬 [Demo](https://www.youtube.com/watch?v=90gr5lwjIho) | 📦 [Package Usage](#tradingagents-package) | 🤝 [Contributing](#contributing) | 📄 [Citation](#citation)
|
||||
|
||||
</div>
|
||||
|
||||
## TradingAgents Framework
|
||||
|
||||
TradingAgents is a multi-agent trading framework that mirrors the dynamics of real-world trading firms. By deploying specialized LLM-powered agents: from fundamental analysts, sentiment experts, and technical analysts, to trader, risk management team, the platform collaboratively evaluates market conditions and informs trading decisions. Moreover, these agents engage in dynamic discussions to pinpoint the optimal strategy.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/schema.png" style="width: 100%; height: auto;">
|
||||
</p>
|
||||
|
||||
> TradingAgents framework is designed for research purposes. Trading performance may vary based on many factors, including the chosen backbone language models, model temperature, trading periods, the quality of data, and other non-deterministic factors. [It is not intended as financial, investment, or trading advice.](https://tauric.ai/disclaimer/)
|
||||
|
||||
Our framework decomposes complex trading tasks into specialized roles. This ensures the system achieves a robust, scalable approach to market analysis and decision-making.
|
||||
|
||||
### Analyst Team
|
||||
- Fundamentals Analyst: Evaluates company financials and performance metrics, identifying intrinsic values and potential red flags.
|
||||
- Sentiment Analyst: Analyzes social media and public sentiment using sentiment scoring algorithms to gauge short-term market mood.
|
||||
- News Analyst: Monitors global news and macroeconomic indicators, interpreting the impact of events on market conditions.
|
||||
- Technical Analyst: Utilizes technical indicators (like MACD and RSI) to detect trading patterns and forecast price movements.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/analyst.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
### Researcher Team
|
||||
- Comprises both bullish and bearish researchers who critically assess the insights provided by the Analyst Team. Through structured debates, they balance potential gains against inherent risks.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/researcher.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
### Trader Agent
|
||||
- Composes reports from the analysts and researchers to make informed trading decisions. It determines the timing and magnitude of trades based on comprehensive market insights.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/trader.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
### Risk Management and Portfolio Manager
|
||||
- Continuously evaluates portfolio risk by assessing market volatility, liquidity, and other risk factors. The risk management team evaluates and adjusts trading strategies, providing assessment reports to the Portfolio Manager for final decision.
|
||||
- The Portfolio Manager approves/rejects the transaction proposal. If approved, the order will be sent to the simulated exchange and executed.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/risk.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
## Installation and CLI
|
||||
|
||||
### Installation
|
||||
|
||||
Clone TradingAgents:
|
||||
```bash
|
||||
git clone https://github.com/TauricResearch/TradingAgents.git
|
||||
cd TradingAgents
|
||||
```
|
||||
|
||||
Create a virtual environment in any of your favorite environment managers:
|
||||
```bash
|
||||
conda create -n tradingagents python=3.13
|
||||
conda activate tradingagents
|
||||
```
|
||||
|
||||
Install dependencies:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Required APIs
|
||||
|
||||
TradingAgents supports multiple LLM providers. Set the API key for your chosen provider:
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY=... # OpenAI (GPT)
|
||||
export GOOGLE_API_KEY=... # Google (Gemini)
|
||||
export ANTHROPIC_API_KEY=... # Anthropic (Claude)
|
||||
export XAI_API_KEY=... # xAI (Grok)
|
||||
export OPENROUTER_API_KEY=... # OpenRouter
|
||||
export ALPHA_VANTAGE_API_KEY=... # Alpha Vantage
|
||||
```
|
||||
|
||||
For local models, configure Ollama with `llm_provider: "ollama"` in your config.
|
||||
|
||||
Alternatively, copy `.env.example` to `.env` and fill in your keys:
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
### CLI Usage
|
||||
|
||||
You can also try out the CLI directly by running:
|
||||
```bash
|
||||
python -m cli.main
|
||||
```
|
||||
You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
An interface will appear showing results as they load, letting you track the agent's progress as it runs.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/cli/cli_news.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/cli/cli_transaction.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
## TradingAgents Package
|
||||
|
||||
### Implementation Details
|
||||
|
||||
We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Google, Anthropic, xAI, OpenRouter, and Ollama.
|
||||
|
||||
### Python Usage
|
||||
|
||||
To use TradingAgents inside your code, you can import the `tradingagents` module and initialize a `TradingAgentsGraph()` object. The `.propagate()` function will return a decision. You can run `main.py`, here's also a quick example:
|
||||
|
||||
```python
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
ta = TradingAgentsGraph(debug=True, config=DEFAULT_CONFIG.copy())
|
||||
|
||||
# forward propagate
|
||||
_, decision = ta.propagate("NVDA", "2026-01-15")
|
||||
print(decision)
|
||||
```
|
||||
|
||||
You can also adjust the default configuration to set your own choice of LLMs, debate rounds, etc.
|
||||
|
||||
```python
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = "openai" # openai, google, anthropic, xai, openrouter, ollama
|
||||
config["deep_think_llm"] = "gpt-5.2" # Model for complex reasoning
|
||||
config["quick_think_llm"] = "gpt-5-mini" # Model for quick tasks
|
||||
config["max_debate_rounds"] = 2
|
||||
|
||||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
_, decision = ta.propagate("NVDA", "2026-01-15")
|
||||
print(decision)
|
||||
```
|
||||
|
||||
See `tradingagents/default_config.py` for all configuration options.
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions from the community! Whether it's fixing a bug, improving documentation, or suggesting a new feature, your input helps make this project better. If you are interested in this line of research, please consider joining our open-source financial AI research community [Tauric Research](https://tauric.ai/).
|
||||
|
||||
## Citation
|
||||
|
||||
Please reference our work if you find *TradingAgents* provides you with some help :)
|
||||
|
||||
```
|
||||
@misc{xiao2025tradingagentsmultiagentsllmfinancial,
|
||||
title={TradingAgents: Multi-Agents LLM Financial Trading Framework},
|
||||
author={Yijia Xiao and Edward Sun and Di Luo and Wei Wang},
|
||||
year={2025},
|
||||
eprint={2412.20138},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={q-fin.TR},
|
||||
url={https://arxiv.org/abs/2412.20138},
|
||||
}
|
||||
```
|
||||
<p align="center">
|
||||
<img src="assets/TauricResearch.png" style="width: 60%; height: auto;">
|
||||
</p>
|
||||
|
||||
<div align="center" style="line-height: 1;">
|
||||
<a href="https://arxiv.org/abs/2412.20138" target="_blank"><img alt="arXiv" src="https://img.shields.io/badge/arXiv-2412.20138-B31B1B?logo=arxiv"/></a>
|
||||
<a href="https://discord.com/invite/hk9PGKShPK" target="_blank"><img alt="Discord" src="https://img.shields.io/badge/Discord-TradingResearch-7289da?logo=discord&logoColor=white&color=7289da"/></a>
|
||||
<a href="./assets/wechat.png" target="_blank"><img alt="WeChat" src="https://img.shields.io/badge/WeChat-TauricResearch-brightgreen?logo=wechat&logoColor=white"/></a>
|
||||
<a href="https://x.com/TauricResearch" target="_blank"><img alt="X Follow" src="https://img.shields.io/badge/X-TauricResearch-white?logo=x&logoColor=white"/></a>
|
||||
<br>
|
||||
<a href="https://github.com/TauricResearch/" target="_blank"><img alt="Community" src="https://img.shields.io/badge/Join_GitHub_Community-TauricResearch-14C290?logo=discourse"/></a>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=de">Deutsch</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=es">Español</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=fr">français</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ja">日本語</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ko">한국어</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=pt">Português</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ru">Русский</a> |
|
||||
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=zh">中文</a>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
# TradingAgents: Multi-Agents LLM Financial Trading Framework
|
||||
|
||||
## News
|
||||
- [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x) and improved system architecture.
|
||||
- [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon.
|
||||
|
||||
<div align="center">
|
||||
<a href="https://www.star-history.com/#TauricResearch/TradingAgents&Date">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date&theme=dark" />
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" />
|
||||
<img alt="TradingAgents Star History" src="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" style="width: 80%; height: auto;" />
|
||||
</picture>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
> 🎉 **TradingAgents** officially released! We have received numerous inquiries about the work, and we would like to express our thanks for the enthusiasm in our community.
|
||||
>
|
||||
> So we decided to fully open-source the framework. Looking forward to building impactful projects with you!
|
||||
|
||||
<div align="center">
|
||||
|
||||
🚀 [TradingAgents](#tradingagents-framework) | ⚡ [Installation & CLI](#installation-and-cli) | 🎬 [Demo](https://www.youtube.com/watch?v=90gr5lwjIho) | 📦 [Package Usage](#tradingagents-package) | 🤝 [Contributing](#contributing) | 📄 [Citation](#citation)
|
||||
|
||||
</div>
|
||||
|
||||
## TradingAgents Framework
|
||||
|
||||
TradingAgents is a multi-agent trading framework that mirrors the dynamics of real-world trading firms. By deploying specialized LLM-powered agents: from fundamental analysts, sentiment experts, and technical analysts, to trader, risk management team, the platform collaboratively evaluates market conditions and informs trading decisions. Moreover, these agents engage in dynamic discussions to pinpoint the optimal strategy.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/schema.png" style="width: 100%; height: auto;">
|
||||
</p>
|
||||
|
||||
> TradingAgents framework is designed for research purposes. Trading performance may vary based on many factors, including the chosen backbone language models, model temperature, trading periods, the quality of data, and other non-deterministic factors. [It is not intended as financial, investment, or trading advice.](https://tauric.ai/disclaimer/)
|
||||
|
||||
Our framework decomposes complex trading tasks into specialized roles. This ensures the system achieves a robust, scalable approach to market analysis and decision-making.
|
||||
|
||||
### Analyst Team
|
||||
- Fundamentals Analyst: Evaluates company financials and performance metrics, identifying intrinsic values and potential red flags.
|
||||
- Sentiment Analyst: Analyzes social media and public sentiment using sentiment scoring algorithms to gauge short-term market mood.
|
||||
- News Analyst: Monitors global news and macroeconomic indicators, interpreting the impact of events on market conditions.
|
||||
- Technical Analyst: Utilizes technical indicators (like MACD and RSI) to detect trading patterns and forecast price movements.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/analyst.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
### Researcher Team
|
||||
- Comprises both bullish and bearish researchers who critically assess the insights provided by the Analyst Team. Through structured debates, they balance potential gains against inherent risks.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/researcher.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
### Trader Agent
|
||||
- Composes reports from the analysts and researchers to make informed trading decisions. It determines the timing and magnitude of trades based on comprehensive market insights.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/trader.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
### Risk Management and Portfolio Manager
|
||||
- Continuously evaluates portfolio risk by assessing market volatility, liquidity, and other risk factors. The risk management team evaluates and adjusts trading strategies, providing assessment reports to the Portfolio Manager for final decision.
|
||||
- The Portfolio Manager approves/rejects the transaction proposal. If approved, the order will be sent to the simulated exchange and executed.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/risk.png" width="70%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
## Installation and CLI
|
||||
|
||||
### Installation
|
||||
|
||||
Clone TradingAgents:
|
||||
```bash
|
||||
git clone https://github.com/TauricResearch/TradingAgents.git
|
||||
cd TradingAgents
|
||||
```
|
||||
|
||||
Create a virtual environment in any of your favorite environment managers:
|
||||
```bash
|
||||
conda create -n tradingagents python=3.13
|
||||
conda activate tradingagents
|
||||
```
|
||||
|
||||
Install dependencies:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Required APIs
|
||||
|
||||
TradingAgents supports multiple LLM providers. Set the API key for your chosen provider:
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY=... # OpenAI (GPT)
|
||||
export GOOGLE_API_KEY=... # Google (Gemini)
|
||||
export ANTHROPIC_API_KEY=... # Anthropic (Claude)
|
||||
export XAI_API_KEY=... # xAI (Grok)
|
||||
export OPENROUTER_API_KEY=... # OpenRouter
|
||||
export ALPHA_VANTAGE_API_KEY=... # Alpha Vantage
|
||||
```
|
||||
|
||||
For local models, configure Ollama with `llm_provider: "ollama"` in your config.
|
||||
|
||||
Alternatively, copy `.env.example` to `.env` and fill in your keys:
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
### CLI Usage
|
||||
|
||||
You can also try out the CLI directly by running:
|
||||
```bash
|
||||
python -m cli.main
|
||||
```
|
||||
You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
An interface will appear showing results as they load, letting you track the agent's progress as it runs.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/cli/cli_news.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/cli/cli_transaction.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
## TradingAgents Package
|
||||
|
||||
### Implementation Details
|
||||
|
||||
We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Google, Anthropic, xAI, OpenRouter, and Ollama.
|
||||
|
||||
### Python Usage
|
||||
|
||||
To use TradingAgents inside your code, you can import the `tradingagents` module and initialize a `TradingAgentsGraph()` object. The `.propagate()` function will return a decision. You can run `main.py`, here's also a quick example:
|
||||
|
||||
```python
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
ta = TradingAgentsGraph(debug=True, config=DEFAULT_CONFIG.copy())
|
||||
|
||||
# forward propagate
|
||||
_, decision = ta.propagate("NVDA", "2026-01-15")
|
||||
print(decision)
|
||||
```
|
||||
|
||||
You can also adjust the default configuration to set your own choice of LLMs, debate rounds, etc.
|
||||
|
||||
```python
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = "openai" # openai, google, anthropic, xai, openrouter, ollama
|
||||
config["deep_think_llm"] = "gpt-5.2" # Model for complex reasoning
|
||||
config["quick_think_llm"] = "gpt-5-mini" # Model for quick tasks
|
||||
config["max_debate_rounds"] = 2
|
||||
|
||||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
_, decision = ta.propagate("NVDA", "2026-01-15")
|
||||
print(decision)
|
||||
```
|
||||
|
||||
See `tradingagents/default_config.py` for all configuration options.
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions from the community! Whether it's fixing a bug, improving documentation, or suggesting a new feature, your input helps make this project better. If you are interested in this line of research, please consider joining our open-source financial AI research community [Tauric Research](https://tauric.ai/).
|
||||
|
||||
## Citation
|
||||
|
||||
Please reference our work if you find *TradingAgents* provides you with some help :)
|
||||
|
||||
```
|
||||
@misc{xiao2025tradingagentsmultiagentsllmfinancial,
|
||||
title={TradingAgents: Multi-Agents LLM Financial Trading Framework},
|
||||
author={Yijia Xiao and Edward Sun and Di Luo and Wei Wang},
|
||||
year={2025},
|
||||
eprint={2412.20138},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={q-fin.TR},
|
||||
url={https://arxiv.org/abs/2412.20138},
|
||||
}
|
||||
```
|
||||
|
|
|
|||
984
app.py
984
app.py
|
|
@ -1,492 +1,492 @@
|
|||
"""FastAPI SSE backend for the structured equity ranking engine."""
|
||||
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv(Path(__file__).parent / ".env")
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
import asyncio
|
||||
import json
|
||||
import traceback as _tb
|
||||
from datetime import date
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Request, Depends
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s %(levelname)s %(name)s %(message)s",
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
|
||||
# If using Groq (or other OpenAI-compatible), set OPENAI_API_KEY for langchain
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
groq_key = os.environ.get("GROQ_API_KEY", "")
|
||||
if groq_key:
|
||||
os.environ["OPENAI_API_KEY"] = groq_key
|
||||
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
app = FastAPI(title="TradingAgents Structured Pipeline")
|
||||
|
||||
# --- CORS ---
|
||||
_cors_env = os.getenv("CORS_ORIGINS", "")
|
||||
_cors_origins = [o.strip() for o in _cors_env.split(",") if o.strip()] if _cors_env else ["*"]
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=_cors_origins,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# --- Auth ---
|
||||
_API_KEY = os.getenv("AGENTS_API_KEY", "")
|
||||
|
||||
|
||||
async def verify_api_key(request: Request):
|
||||
if not _API_KEY:
|
||||
return
|
||||
auth = request.headers.get("Authorization", "")
|
||||
if auth != f"Bearer {_API_KEY}":
|
||||
raise HTTPException(401, "Invalid or missing API key")
|
||||
|
||||
|
||||
# --- Concurrency ---
|
||||
MAX_CONCURRENT = int(os.getenv("MAX_CONCURRENT_ANALYSES", "3"))
|
||||
_semaphore = asyncio.Semaphore(MAX_CONCURRENT)
|
||||
|
||||
# --- Event buffer cap ---
|
||||
MAX_EVENTS_PER_ANALYSIS = 5000
|
||||
|
||||
analyses: dict[str, dict] = {}
|
||||
|
||||
|
||||
def _append_event(state: dict, evt: dict):
|
||||
"""Append an event to the analysis state, enforcing the buffer cap."""
|
||||
events = state["events"]
|
||||
events.append(evt)
|
||||
if len(events) > MAX_EVENTS_PER_ANALYSIS:
|
||||
# Drop oldest events, keep the last MAX_EVENTS_PER_ANALYSIS
|
||||
state["events"] = events[-MAX_EVENTS_PER_ANALYSIS:]
|
||||
|
||||
|
||||
class AnalyzeRequest(BaseModel):
|
||||
ticker: str
|
||||
date: str | None = None
|
||||
|
||||
|
||||
def build_config():
|
||||
"""Build TradingAgents config from env vars."""
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = os.getenv("LLM_PROVIDER", "openai")
|
||||
config["deep_think_llm"] = os.getenv("DEEP_THINK_MODEL", "deepseek-v3.1:671b-cloud")
|
||||
config["quick_think_llm"] = os.getenv("QUICK_THINK_MODEL", "deepseek-v3.1:671b-cloud")
|
||||
config["backend_url"] = os.getenv("LLM_BASE_URL", "https://ollama.com/v1")
|
||||
config["max_debate_rounds"] = 1
|
||||
config["max_risk_discuss_rounds"] = 1
|
||||
config["data_vendors"] = {
|
||||
"core_stock_apis": "yfinance",
|
||||
"technical_indicators": "yfinance",
|
||||
"fundamental_data": "yfinance",
|
||||
"news_data": "yfinance",
|
||||
}
|
||||
logger.info(
|
||||
"config_built provider=%s deep=%s quick=%s url=%s",
|
||||
config['llm_provider'], config['deep_think_llm'],
|
||||
config['quick_think_llm'], config['backend_url'],
|
||||
)
|
||||
return config
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stage/agent mapping for SSE events
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Maps state field → (agent display name, pipeline stage)
|
||||
FIELD_AGENT_MAP = {
|
||||
"validation": ("Validation", "validation"),
|
||||
"company_card": ("Company Card", "validation"),
|
||||
"macro": ("Macro Regime", "tier1"),
|
||||
"liquidity": ("Liquidity", "tier1"),
|
||||
"business_quality": ("Business Quality", "tier2"),
|
||||
"institutional_flow": ("Institutional Flow", "tier2"),
|
||||
"valuation": ("Valuation", "tier2"),
|
||||
"entry_timing": ("Entry Timing", "tier2"),
|
||||
"earnings_revisions": ("Earnings Revisions", "tier2"),
|
||||
"sector_rotation": ("Sector Rotation", "tier2"),
|
||||
"backlog": ("Backlog / Order Momentum", "tier2"),
|
||||
"crowding": ("Narrative Crowding", "tier2"),
|
||||
"archetype": ("Archetype", "scoring"),
|
||||
"master_score": ("Master Score", "scoring"),
|
||||
"theme_substitution": ("Theme Substitution", "portfolio"),
|
||||
"position_replacement": ("Position Replacement", "portfolio"),
|
||||
"bull_case": ("Bull Researcher", "debate"),
|
||||
"bear_case": ("Bear Researcher", "debate"),
|
||||
"debate": ("Debate Referee", "debate"),
|
||||
"risk": ("Risk / Invalidation", "decision"),
|
||||
"final_decision": ("Final Decision", "decision"),
|
||||
}
|
||||
|
||||
ALL_AGENTS = [name for name, _ in FIELD_AGENT_MAP.values()]
|
||||
ALL_STAGES = ["validation", "tier1", "tier2", "scoring", "portfolio", "debate", "decision"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Analysis runner
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def _run_analysis_inner(analysis_id: str, ticker: str, trade_date: str):
|
||||
"""Core analysis logic — streams structured pipeline state changes as SSE."""
|
||||
state = analyses[analysis_id]
|
||||
q = state["queue"]
|
||||
config = build_config()
|
||||
|
||||
try:
|
||||
graph = TradingAgentsGraph(debug=False, config=config)
|
||||
logger.info(
|
||||
"analysis_init_ok deep_llm=%s quick_llm=%s analysis_id=%s",
|
||||
type(graph.deep_thinking_llm).__name__,
|
||||
type(graph.quick_thinking_llm).__name__,
|
||||
analysis_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("analysis_init_failed analysis_id=%s error=%s\n%s", analysis_id, e, _tb.format_exc())
|
||||
await q.put({"type": "error", "message": f"Init failed: {e}"})
|
||||
await q.put(None)
|
||||
return
|
||||
|
||||
init_state = graph._create_initial_state(ticker, trade_date)
|
||||
start_time = time.time()
|
||||
emitted_fields = set()
|
||||
prev_agent_statuses = {}
|
||||
final_state = None
|
||||
|
||||
# Emit initial status: all agents pending
|
||||
for field, (agent_name, stage) in FIELD_AGENT_MAP.items():
|
||||
prev_agent_statuses[field] = "pending"
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "pending",
|
||||
"stats": _stats(start_time, emitted_fields),
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
try:
|
||||
async for chunk in graph.graph.astream(
|
||||
init_state,
|
||||
stream_mode="values",
|
||||
config={"recursion_limit": 25},
|
||||
):
|
||||
final_state = chunk
|
||||
|
||||
# Detect newly populated fields
|
||||
for field, (agent_name, stage) in FIELD_AGENT_MAP.items():
|
||||
if field in emitted_fields:
|
||||
continue
|
||||
|
||||
value = chunk.get(field)
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
emitted_fields.add(field)
|
||||
st = _stats(start_time, emitted_fields)
|
||||
|
||||
# Mark this agent completed
|
||||
prev_agent_statuses[field] = "completed"
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "completed",
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
# Emit report data for key fields
|
||||
if field in ("validation", "company_card"):
|
||||
evt = {
|
||||
"type": "report",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"field": field,
|
||||
"report": _format_report(field, value),
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
elif field == "debate":
|
||||
bull = chunk.get("bull_case") or {}
|
||||
bear = chunk.get("bear_case") or {}
|
||||
evt = {
|
||||
"type": "debate",
|
||||
"stage": "debate",
|
||||
"bull": bull.get("thesis", ""),
|
||||
"bear": bear.get("thesis", ""),
|
||||
"judge": (value or {}).get("reasoning", ""),
|
||||
"winner": (value or {}).get("winner", ""),
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
elif field == "master_score":
|
||||
evt = {
|
||||
"type": "score",
|
||||
"stage": "scoring",
|
||||
"master_score": value,
|
||||
"adjusted_score": chunk.get("adjusted_score"),
|
||||
"position_role": chunk.get("position_role"),
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
# Mark in-progress agents for upcoming stages
|
||||
await _update_in_progress(chunk, emitted_fields, prev_agent_statuses, state, q, start_time)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("analysis_stream_error analysis_id=%s error=%s\n%s", analysis_id, e, _tb.format_exc())
|
||||
evt = {"type": "error", "message": str(e)}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
state["done"] = True
|
||||
await q.put(None)
|
||||
return
|
||||
|
||||
# Final decision event
|
||||
if final_state:
|
||||
decision = final_state.get("final_decision") or {}
|
||||
st = _stats(start_time, emitted_fields)
|
||||
|
||||
# Mark all remaining as completed
|
||||
for field in FIELD_AGENT_MAP:
|
||||
if prev_agent_statuses.get(field) != "completed":
|
||||
agent_name, stage = FIELD_AGENT_MAP[field]
|
||||
prev_agent_statuses[field] = "completed"
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "completed",
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
evt = {
|
||||
"type": "decision",
|
||||
"stage": "decision",
|
||||
"signal": decision.get("action", "AVOID"),
|
||||
"decision_text": decision.get("narrative", ""),
|
||||
"master_score": final_state.get("master_score"),
|
||||
"adjusted_score": final_state.get("adjusted_score"),
|
||||
"position_role": final_state.get("position_role"),
|
||||
"final_decision": decision,
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
state["done"] = True
|
||||
await q.put(None)
|
||||
|
||||
|
||||
async def _update_in_progress(chunk, emitted, statuses, state, q, start_time):
|
||||
"""Heuristic: mark agents as in_progress based on stage progression."""
|
||||
# If validation is done, mark tier 1 as in_progress
|
||||
if "validation" in emitted:
|
||||
for field in ("macro", "liquidity"):
|
||||
if field not in emitted and statuses.get(field) == "pending":
|
||||
statuses[field] = "in_progress"
|
||||
agent_name, stage = FIELD_AGENT_MAP[field]
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "in_progress",
|
||||
"stats": _stats(start_time, emitted),
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
# If tier 1 done, mark tier 2 in_progress
|
||||
if "macro" in emitted and "liquidity" in emitted:
|
||||
tier2_fields = [
|
||||
"business_quality", "institutional_flow", "valuation",
|
||||
"entry_timing", "earnings_revisions", "sector_rotation",
|
||||
"backlog", "crowding",
|
||||
]
|
||||
for field in tier2_fields:
|
||||
if field not in emitted and statuses.get(field) == "pending":
|
||||
statuses[field] = "in_progress"
|
||||
agent_name, stage = FIELD_AGENT_MAP[field]
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "in_progress",
|
||||
"stats": _stats(start_time, emitted),
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
# If scoring done, mark portfolio analysis in_progress
|
||||
if "master_score" in emitted:
|
||||
for field in ("theme_substitution", "position_replacement"):
|
||||
if field not in emitted and statuses.get(field) == "pending":
|
||||
statuses[field] = "in_progress"
|
||||
agent_name, stage = FIELD_AGENT_MAP[field]
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "in_progress",
|
||||
"stats": _stats(start_time, emitted),
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
|
||||
def _stats(start_time: float, emitted_fields: set) -> dict:
|
||||
return {
|
||||
"agents_done": len(emitted_fields),
|
||||
"agents_total": len(FIELD_AGENT_MAP),
|
||||
"elapsed": round(time.time() - start_time, 1),
|
||||
}
|
||||
|
||||
|
||||
def _format_report(field: str, value) -> str:
|
||||
"""Format a state field value as a readable report string."""
|
||||
if isinstance(value, dict):
|
||||
if "summary_1_sentence" in value:
|
||||
return value["summary_1_sentence"]
|
||||
if "company_name" in value:
|
||||
return f"{value.get('company_name', '')} ({value.get('ticker', '')}) — {value.get('sector', '')} / {value.get('industry', '')}"
|
||||
return json.dumps(value, indent=2, default=str)[:500]
|
||||
return str(value)[:500]
|
||||
|
||||
|
||||
async def run_analysis(analysis_id: str, ticker: str, trade_date: str):
|
||||
"""Background task with semaphore and timeout."""
|
||||
state = analyses[analysis_id]
|
||||
q = state["queue"]
|
||||
async with _semaphore:
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
_run_analysis_inner(analysis_id, ticker, trade_date),
|
||||
timeout=3600,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning("analysis_timeout analysis_id=%s", analysis_id)
|
||||
evt = {"type": "error", "message": "Analysis timed out after 60 minutes"}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
state["done"] = True
|
||||
await q.put(None)
|
||||
|
||||
|
||||
# --- Cleanup ---
|
||||
async def _cleanup_loop():
|
||||
while True:
|
||||
await asyncio.sleep(300)
|
||||
now = time.time()
|
||||
expired = [aid for aid, s in analyses.items() if now - s["created_at"] > 1800]
|
||||
for aid in expired:
|
||||
analyses.pop(aid, None)
|
||||
if expired:
|
||||
logger.info("cleanup_expired count=%d", len(expired))
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def _start_cleanup():
|
||||
asyncio.create_task(_cleanup_loop())
|
||||
|
||||
|
||||
# --- Routes ---
|
||||
|
||||
@app.post("/analyze", dependencies=[Depends(verify_api_key)])
|
||||
async def start_analysis(req: AnalyzeRequest):
|
||||
ticker = req.ticker.upper().strip()
|
||||
if not ticker:
|
||||
raise HTTPException(400, "Ticker must not be empty")
|
||||
if len(ticker) > 10:
|
||||
raise HTTPException(400, f"Ticker too long ({len(ticker)} chars, max 10)")
|
||||
if not re.match(r'^[A-Z0-9.\-]{1,10}$', ticker):
|
||||
raise HTTPException(400, "Invalid ticker — only letters, digits, dots, and hyphens allowed")
|
||||
trade_date = req.date or str(date.today())
|
||||
analysis_id = str(uuid.uuid4())
|
||||
analyses[analysis_id] = {
|
||||
"queue": asyncio.Queue(),
|
||||
"events": [],
|
||||
"done": False,
|
||||
"created_at": time.time(),
|
||||
}
|
||||
asyncio.create_task(run_analysis(analysis_id, ticker, trade_date))
|
||||
return {"id": analysis_id, "ticker": ticker, "date": trade_date}
|
||||
|
||||
|
||||
@app.get("/analyze/{analysis_id}/stream", dependencies=[Depends(verify_api_key)])
|
||||
async def stream_analysis(analysis_id: str, last_event: int = 0):
|
||||
"""Stream SSE events. Supports reconnection via ?last_event=N."""
|
||||
if analysis_id not in analyses:
|
||||
raise HTTPException(404, "Analysis not found")
|
||||
state = analyses[analysis_id]
|
||||
|
||||
async def event_generator():
|
||||
idx = last_event
|
||||
while idx < len(state["events"]):
|
||||
evt = state["events"][idx]
|
||||
idx += 1
|
||||
yield {"id": str(idx), "data": json.dumps(evt)}
|
||||
if state["done"]:
|
||||
return
|
||||
q = state["queue"]
|
||||
while True:
|
||||
try:
|
||||
event = await asyncio.wait_for(q.get(), timeout=15)
|
||||
except asyncio.TimeoutError:
|
||||
yield {"event": "heartbeat", "data": json.dumps({"type": "heartbeat"})}
|
||||
continue
|
||||
if event is None:
|
||||
break
|
||||
idx += 1
|
||||
yield {"id": str(idx), "data": json.dumps(event)}
|
||||
|
||||
return EventSourceResponse(event_generator())
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health():
|
||||
return {"status": "ok", "engine": "structured_pipeline"}
|
||||
|
||||
|
||||
@app.get("/api/status")
|
||||
async def get_status():
|
||||
"""Structured pipeline status — no auth required."""
|
||||
from datetime import datetime
|
||||
active_count = len(analyses)
|
||||
return {
|
||||
"service": "structured-pipeline",
|
||||
"engine": "TradingAgents",
|
||||
"active_analyses": active_count,
|
||||
"analyses": {k: {"created": v["created"], "done": v["done"]} for k, v in analyses.items()},
|
||||
"pid": __import__("os").getpid(),
|
||||
"uptime": time.time() - __import__("os").getpid(),
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/health")
|
||||
async def api_health():
|
||||
return {"status": "ok", "service": "structured-pipeline"}
|
||||
"""FastAPI SSE backend for the structured equity ranking engine."""
|
||||
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv(Path(__file__).parent / ".env")
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
import asyncio
|
||||
import json
|
||||
import traceback as _tb
|
||||
from datetime import date
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Request, Depends
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s %(levelname)s %(name)s %(message)s",
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
|
||||
# If using Groq (or other OpenAI-compatible), set OPENAI_API_KEY for langchain
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
groq_key = os.environ.get("GROQ_API_KEY", "")
|
||||
if groq_key:
|
||||
os.environ["OPENAI_API_KEY"] = groq_key
|
||||
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
app = FastAPI(title="TradingAgents Structured Pipeline")
|
||||
|
||||
# --- CORS ---
|
||||
_cors_env = os.getenv("CORS_ORIGINS", "")
|
||||
_cors_origins = [o.strip() for o in _cors_env.split(",") if o.strip()] if _cors_env else ["*"]
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=_cors_origins,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# --- Auth ---
|
||||
_API_KEY = os.getenv("AGENTS_API_KEY", "")
|
||||
|
||||
|
||||
async def verify_api_key(request: Request):
|
||||
if not _API_KEY:
|
||||
return
|
||||
auth = request.headers.get("Authorization", "")
|
||||
if auth != f"Bearer {_API_KEY}":
|
||||
raise HTTPException(401, "Invalid or missing API key")
|
||||
|
||||
|
||||
# --- Concurrency ---
|
||||
MAX_CONCURRENT = int(os.getenv("MAX_CONCURRENT_ANALYSES", "3"))
|
||||
_semaphore = asyncio.Semaphore(MAX_CONCURRENT)
|
||||
|
||||
# --- Event buffer cap ---
|
||||
MAX_EVENTS_PER_ANALYSIS = 5000
|
||||
|
||||
analyses: dict[str, dict] = {}
|
||||
|
||||
|
||||
def _append_event(state: dict, evt: dict):
|
||||
"""Append an event to the analysis state, enforcing the buffer cap."""
|
||||
events = state["events"]
|
||||
events.append(evt)
|
||||
if len(events) > MAX_EVENTS_PER_ANALYSIS:
|
||||
# Drop oldest events, keep the last MAX_EVENTS_PER_ANALYSIS
|
||||
state["events"] = events[-MAX_EVENTS_PER_ANALYSIS:]
|
||||
|
||||
|
||||
class AnalyzeRequest(BaseModel):
|
||||
ticker: str
|
||||
date: str | None = None
|
||||
|
||||
|
||||
def build_config():
|
||||
"""Build TradingAgents config from env vars."""
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = os.getenv("LLM_PROVIDER", "openai")
|
||||
config["deep_think_llm"] = os.getenv("DEEP_THINK_MODEL", "deepseek-v3.1:671b-cloud")
|
||||
config["quick_think_llm"] = os.getenv("QUICK_THINK_MODEL", "deepseek-v3.1:671b-cloud")
|
||||
config["backend_url"] = os.getenv("LLM_BASE_URL", "https://ollama.com/v1")
|
||||
config["max_debate_rounds"] = 1
|
||||
config["max_risk_discuss_rounds"] = 1
|
||||
config["data_vendors"] = {
|
||||
"core_stock_apis": "yfinance",
|
||||
"technical_indicators": "yfinance",
|
||||
"fundamental_data": "yfinance",
|
||||
"news_data": "yfinance",
|
||||
}
|
||||
logger.info(
|
||||
"config_built provider=%s deep=%s quick=%s url=%s",
|
||||
config['llm_provider'], config['deep_think_llm'],
|
||||
config['quick_think_llm'], config['backend_url'],
|
||||
)
|
||||
return config
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stage/agent mapping for SSE events
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Maps state field → (agent display name, pipeline stage)
|
||||
FIELD_AGENT_MAP = {
|
||||
"validation": ("Validation", "validation"),
|
||||
"company_card": ("Company Card", "validation"),
|
||||
"macro": ("Macro Regime", "tier1"),
|
||||
"liquidity": ("Liquidity", "tier1"),
|
||||
"business_quality": ("Business Quality", "tier2"),
|
||||
"institutional_flow": ("Institutional Flow", "tier2"),
|
||||
"valuation": ("Valuation", "tier2"),
|
||||
"entry_timing": ("Entry Timing", "tier2"),
|
||||
"earnings_revisions": ("Earnings Revisions", "tier2"),
|
||||
"sector_rotation": ("Sector Rotation", "tier2"),
|
||||
"backlog": ("Backlog / Order Momentum", "tier2"),
|
||||
"crowding": ("Narrative Crowding", "tier2"),
|
||||
"archetype": ("Archetype", "scoring"),
|
||||
"master_score": ("Master Score", "scoring"),
|
||||
"theme_substitution": ("Theme Substitution", "portfolio"),
|
||||
"position_replacement": ("Position Replacement", "portfolio"),
|
||||
"bull_case": ("Bull Researcher", "debate"),
|
||||
"bear_case": ("Bear Researcher", "debate"),
|
||||
"debate": ("Debate Referee", "debate"),
|
||||
"risk": ("Risk / Invalidation", "decision"),
|
||||
"final_decision": ("Final Decision", "decision"),
|
||||
}
|
||||
|
||||
ALL_AGENTS = [name for name, _ in FIELD_AGENT_MAP.values()]
|
||||
ALL_STAGES = ["validation", "tier1", "tier2", "scoring", "portfolio", "debate", "decision"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Analysis runner
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def _run_analysis_inner(analysis_id: str, ticker: str, trade_date: str):
|
||||
"""Core analysis logic — streams structured pipeline state changes as SSE."""
|
||||
state = analyses[analysis_id]
|
||||
q = state["queue"]
|
||||
config = build_config()
|
||||
|
||||
try:
|
||||
graph = TradingAgentsGraph(debug=False, config=config)
|
||||
logger.info(
|
||||
"analysis_init_ok deep_llm=%s quick_llm=%s analysis_id=%s",
|
||||
type(graph.deep_thinking_llm).__name__,
|
||||
type(graph.quick_thinking_llm).__name__,
|
||||
analysis_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("analysis_init_failed analysis_id=%s error=%s\n%s", analysis_id, e, _tb.format_exc())
|
||||
await q.put({"type": "error", "message": f"Init failed: {e}"})
|
||||
await q.put(None)
|
||||
return
|
||||
|
||||
init_state = graph._create_initial_state(ticker, trade_date)
|
||||
start_time = time.time()
|
||||
emitted_fields = set()
|
||||
prev_agent_statuses = {}
|
||||
final_state = None
|
||||
|
||||
# Emit initial status: all agents pending
|
||||
for field, (agent_name, stage) in FIELD_AGENT_MAP.items():
|
||||
prev_agent_statuses[field] = "pending"
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "pending",
|
||||
"stats": _stats(start_time, emitted_fields),
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
try:
|
||||
async for chunk in graph.graph.astream(
|
||||
init_state,
|
||||
stream_mode="values",
|
||||
config={"recursion_limit": 25},
|
||||
):
|
||||
final_state = chunk
|
||||
|
||||
# Detect newly populated fields
|
||||
for field, (agent_name, stage) in FIELD_AGENT_MAP.items():
|
||||
if field in emitted_fields:
|
||||
continue
|
||||
|
||||
value = chunk.get(field)
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
emitted_fields.add(field)
|
||||
st = _stats(start_time, emitted_fields)
|
||||
|
||||
# Mark this agent completed
|
||||
prev_agent_statuses[field] = "completed"
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "completed",
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
# Emit report data for key fields
|
||||
if field in ("validation", "company_card"):
|
||||
evt = {
|
||||
"type": "report",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"field": field,
|
||||
"report": _format_report(field, value),
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
elif field == "debate":
|
||||
bull = chunk.get("bull_case") or {}
|
||||
bear = chunk.get("bear_case") or {}
|
||||
evt = {
|
||||
"type": "debate",
|
||||
"stage": "debate",
|
||||
"bull": bull.get("thesis", ""),
|
||||
"bear": bear.get("thesis", ""),
|
||||
"judge": (value or {}).get("reasoning", ""),
|
||||
"winner": (value or {}).get("winner", ""),
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
elif field == "master_score":
|
||||
evt = {
|
||||
"type": "score",
|
||||
"stage": "scoring",
|
||||
"master_score": value,
|
||||
"adjusted_score": chunk.get("adjusted_score"),
|
||||
"position_role": chunk.get("position_role"),
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
# Mark in-progress agents for upcoming stages
|
||||
await _update_in_progress(chunk, emitted_fields, prev_agent_statuses, state, q, start_time)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("analysis_stream_error analysis_id=%s error=%s\n%s", analysis_id, e, _tb.format_exc())
|
||||
evt = {"type": "error", "message": str(e)}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
state["done"] = True
|
||||
await q.put(None)
|
||||
return
|
||||
|
||||
# Final decision event
|
||||
if final_state:
|
||||
decision = final_state.get("final_decision") or {}
|
||||
st = _stats(start_time, emitted_fields)
|
||||
|
||||
# Mark all remaining as completed
|
||||
for field in FIELD_AGENT_MAP:
|
||||
if prev_agent_statuses.get(field) != "completed":
|
||||
agent_name, stage = FIELD_AGENT_MAP[field]
|
||||
prev_agent_statuses[field] = "completed"
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "completed",
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
evt = {
|
||||
"type": "decision",
|
||||
"stage": "decision",
|
||||
"signal": decision.get("action", "AVOID"),
|
||||
"decision_text": decision.get("narrative", ""),
|
||||
"master_score": final_state.get("master_score"),
|
||||
"adjusted_score": final_state.get("adjusted_score"),
|
||||
"position_role": final_state.get("position_role"),
|
||||
"final_decision": decision,
|
||||
"stats": st,
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
state["done"] = True
|
||||
await q.put(None)
|
||||
|
||||
|
||||
async def _update_in_progress(chunk, emitted, statuses, state, q, start_time):
|
||||
"""Heuristic: mark agents as in_progress based on stage progression."""
|
||||
# If validation is done, mark tier 1 as in_progress
|
||||
if "validation" in emitted:
|
||||
for field in ("macro", "liquidity"):
|
||||
if field not in emitted and statuses.get(field) == "pending":
|
||||
statuses[field] = "in_progress"
|
||||
agent_name, stage = FIELD_AGENT_MAP[field]
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "in_progress",
|
||||
"stats": _stats(start_time, emitted),
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
# If tier 1 done, mark tier 2 in_progress
|
||||
if "macro" in emitted and "liquidity" in emitted:
|
||||
tier2_fields = [
|
||||
"business_quality", "institutional_flow", "valuation",
|
||||
"entry_timing", "earnings_revisions", "sector_rotation",
|
||||
"backlog", "crowding",
|
||||
]
|
||||
for field in tier2_fields:
|
||||
if field not in emitted and statuses.get(field) == "pending":
|
||||
statuses[field] = "in_progress"
|
||||
agent_name, stage = FIELD_AGENT_MAP[field]
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "in_progress",
|
||||
"stats": _stats(start_time, emitted),
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
# If scoring done, mark portfolio analysis in_progress
|
||||
if "master_score" in emitted:
|
||||
for field in ("theme_substitution", "position_replacement"):
|
||||
if field not in emitted and statuses.get(field) == "pending":
|
||||
statuses[field] = "in_progress"
|
||||
agent_name, stage = FIELD_AGENT_MAP[field]
|
||||
evt = {
|
||||
"type": "agent_update",
|
||||
"agent": agent_name,
|
||||
"stage": stage,
|
||||
"status": "in_progress",
|
||||
"stats": _stats(start_time, emitted),
|
||||
}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
|
||||
|
||||
def _stats(start_time: float, emitted_fields: set) -> dict:
|
||||
return {
|
||||
"agents_done": len(emitted_fields),
|
||||
"agents_total": len(FIELD_AGENT_MAP),
|
||||
"elapsed": round(time.time() - start_time, 1),
|
||||
}
|
||||
|
||||
|
||||
def _format_report(field: str, value) -> str:
|
||||
"""Format a state field value as a readable report string."""
|
||||
if isinstance(value, dict):
|
||||
if "summary_1_sentence" in value:
|
||||
return value["summary_1_sentence"]
|
||||
if "company_name" in value:
|
||||
return f"{value.get('company_name', '')} ({value.get('ticker', '')}) — {value.get('sector', '')} / {value.get('industry', '')}"
|
||||
return json.dumps(value, indent=2, default=str)[:500]
|
||||
return str(value)[:500]
|
||||
|
||||
|
||||
async def run_analysis(analysis_id: str, ticker: str, trade_date: str):
|
||||
"""Background task with semaphore and timeout."""
|
||||
state = analyses[analysis_id]
|
||||
q = state["queue"]
|
||||
async with _semaphore:
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
_run_analysis_inner(analysis_id, ticker, trade_date),
|
||||
timeout=3600,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning("analysis_timeout analysis_id=%s", analysis_id)
|
||||
evt = {"type": "error", "message": "Analysis timed out after 60 minutes"}
|
||||
_append_event(state, evt)
|
||||
await q.put(evt)
|
||||
state["done"] = True
|
||||
await q.put(None)
|
||||
|
||||
|
||||
# --- Cleanup ---
|
||||
async def _cleanup_loop():
|
||||
while True:
|
||||
await asyncio.sleep(300)
|
||||
now = time.time()
|
||||
expired = [aid for aid, s in analyses.items() if now - s["created_at"] > 1800]
|
||||
for aid in expired:
|
||||
analyses.pop(aid, None)
|
||||
if expired:
|
||||
logger.info("cleanup_expired count=%d", len(expired))
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def _start_cleanup():
|
||||
asyncio.create_task(_cleanup_loop())
|
||||
|
||||
|
||||
# --- Routes ---
|
||||
|
||||
@app.post("/analyze", dependencies=[Depends(verify_api_key)])
|
||||
async def start_analysis(req: AnalyzeRequest):
|
||||
ticker = req.ticker.upper().strip()
|
||||
if not ticker:
|
||||
raise HTTPException(400, "Ticker must not be empty")
|
||||
if len(ticker) > 10:
|
||||
raise HTTPException(400, f"Ticker too long ({len(ticker)} chars, max 10)")
|
||||
if not re.match(r'^[A-Z0-9.\-]{1,10}$', ticker):
|
||||
raise HTTPException(400, "Invalid ticker — only letters, digits, dots, and hyphens allowed")
|
||||
trade_date = req.date or str(date.today())
|
||||
analysis_id = str(uuid.uuid4())
|
||||
analyses[analysis_id] = {
|
||||
"queue": asyncio.Queue(),
|
||||
"events": [],
|
||||
"done": False,
|
||||
"created_at": time.time(),
|
||||
}
|
||||
asyncio.create_task(run_analysis(analysis_id, ticker, trade_date))
|
||||
return {"id": analysis_id, "ticker": ticker, "date": trade_date}
|
||||
|
||||
|
||||
@app.get("/analyze/{analysis_id}/stream", dependencies=[Depends(verify_api_key)])
|
||||
async def stream_analysis(analysis_id: str, last_event: int = 0):
|
||||
"""Stream SSE events. Supports reconnection via ?last_event=N."""
|
||||
if analysis_id not in analyses:
|
||||
raise HTTPException(404, "Analysis not found")
|
||||
state = analyses[analysis_id]
|
||||
|
||||
async def event_generator():
|
||||
idx = last_event
|
||||
while idx < len(state["events"]):
|
||||
evt = state["events"][idx]
|
||||
idx += 1
|
||||
yield {"id": str(idx), "data": json.dumps(evt)}
|
||||
if state["done"]:
|
||||
return
|
||||
q = state["queue"]
|
||||
while True:
|
||||
try:
|
||||
event = await asyncio.wait_for(q.get(), timeout=15)
|
||||
except asyncio.TimeoutError:
|
||||
yield {"event": "heartbeat", "data": json.dumps({"type": "heartbeat"})}
|
||||
continue
|
||||
if event is None:
|
||||
break
|
||||
idx += 1
|
||||
yield {"id": str(idx), "data": json.dumps(event)}
|
||||
|
||||
return EventSourceResponse(event_generator())
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health():
|
||||
return {"status": "ok", "engine": "structured_pipeline"}
|
||||
|
||||
|
||||
@app.get("/api/status")
|
||||
async def get_status():
|
||||
"""Structured pipeline status — no auth required."""
|
||||
from datetime import datetime
|
||||
active_count = len(analyses)
|
||||
return {
|
||||
"service": "structured-pipeline",
|
||||
"engine": "TradingAgents",
|
||||
"active_analyses": active_count,
|
||||
"analyses": {k: {"created": v["created"], "done": v["done"]} for k, v in analyses.items()},
|
||||
"pid": __import__("os").getpid(),
|
||||
"uptime": time.time() - __import__("os").getpid(),
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/health")
|
||||
async def api_health():
|
||||
return {"status": "ok", "service": "structured-pipeline"}
|
||||
|
|
|
|||
|
|
@ -1,51 +1,51 @@
|
|||
import getpass
|
||||
import requests
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from cli.config import CLI_CONFIG
|
||||
|
||||
|
||||
def fetch_announcements(url: str = None, timeout: float = None) -> dict:
|
||||
"""Fetch announcements from endpoint. Returns dict with announcements and settings."""
|
||||
endpoint = url or CLI_CONFIG["announcements_url"]
|
||||
timeout = timeout or CLI_CONFIG["announcements_timeout"]
|
||||
fallback = CLI_CONFIG["announcements_fallback"]
|
||||
|
||||
try:
|
||||
response = requests.get(endpoint, timeout=timeout)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return {
|
||||
"announcements": data.get("announcements", [fallback]),
|
||||
"require_attention": data.get("require_attention", False),
|
||||
}
|
||||
except Exception:
|
||||
return {
|
||||
"announcements": [fallback],
|
||||
"require_attention": False,
|
||||
}
|
||||
|
||||
|
||||
def display_announcements(console: Console, data: dict) -> None:
|
||||
"""Display announcements panel. Prompts for Enter if require_attention is True."""
|
||||
announcements = data.get("announcements", [])
|
||||
require_attention = data.get("require_attention", False)
|
||||
|
||||
if not announcements:
|
||||
return
|
||||
|
||||
content = "\n".join(announcements)
|
||||
|
||||
panel = Panel(
|
||||
content,
|
||||
border_style="cyan",
|
||||
padding=(1, 2),
|
||||
title="Announcements",
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
if require_attention:
|
||||
getpass.getpass("Press Enter to continue...")
|
||||
else:
|
||||
console.print()
|
||||
import getpass
|
||||
import requests
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from cli.config import CLI_CONFIG
|
||||
|
||||
|
||||
def fetch_announcements(url: str = None, timeout: float = None) -> dict:
|
||||
"""Fetch announcements from endpoint. Returns dict with announcements and settings."""
|
||||
endpoint = url or CLI_CONFIG["announcements_url"]
|
||||
timeout = timeout or CLI_CONFIG["announcements_timeout"]
|
||||
fallback = CLI_CONFIG["announcements_fallback"]
|
||||
|
||||
try:
|
||||
response = requests.get(endpoint, timeout=timeout)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return {
|
||||
"announcements": data.get("announcements", [fallback]),
|
||||
"require_attention": data.get("require_attention", False),
|
||||
}
|
||||
except Exception:
|
||||
return {
|
||||
"announcements": [fallback],
|
||||
"require_attention": False,
|
||||
}
|
||||
|
||||
|
||||
def display_announcements(console: Console, data: dict) -> None:
|
||||
"""Display announcements panel. Prompts for Enter if require_attention is True."""
|
||||
announcements = data.get("announcements", [])
|
||||
require_attention = data.get("require_attention", False)
|
||||
|
||||
if not announcements:
|
||||
return
|
||||
|
||||
content = "\n".join(announcements)
|
||||
|
||||
panel = Panel(
|
||||
content,
|
||||
border_style="cyan",
|
||||
padding=(1, 2),
|
||||
title="Announcements",
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
if require_attention:
|
||||
getpass.getpass("Press Enter to continue...")
|
||||
else:
|
||||
console.print()
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
CLI_CONFIG = {
|
||||
# Announcements
|
||||
"announcements_url": "https://api.tauric.ai/v1/announcements",
|
||||
"announcements_timeout": 1.0,
|
||||
"announcements_fallback": "[cyan]For more information, please visit[/cyan] [link=https://github.com/TauricResearch]https://github.com/TauricResearch[/link]",
|
||||
}
|
||||
CLI_CONFIG = {
|
||||
# Announcements
|
||||
"announcements_url": "https://api.tauric.ai/v1/announcements",
|
||||
"announcements_timeout": 1.0,
|
||||
"announcements_fallback": "[cyan]For more information, please visit[/cyan] [link=https://github.com/TauricResearch]https://github.com/TauricResearch[/link]",
|
||||
}
|
||||
|
|
|
|||
2354
cli/main.py
2354
cli/main.py
File diff suppressed because it is too large
Load Diff
|
|
@ -1,10 +1,10 @@
|
|||
from enum import Enum
|
||||
from typing import List, Optional, Dict
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnalystType(str, Enum):
|
||||
MARKET = "market"
|
||||
SOCIAL = "social"
|
||||
NEWS = "news"
|
||||
FUNDAMENTALS = "fundamentals"
|
||||
from enum import Enum
|
||||
from typing import List, Optional, Dict
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AnalystType(str, Enum):
|
||||
MARKET = "market"
|
||||
SOCIAL = "social"
|
||||
NEWS = "news"
|
||||
FUNDAMENTALS = "fundamentals"
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
______ ___ ___ __
|
||||
/_ __/________ _____/ (_)___ ____ _/ | ____ ____ ____ / /______
|
||||
/ / / ___/ __ `/ __ / / __ \/ __ `/ /| |/ __ `/ _ \/ __ \/ __/ ___/
|
||||
/ / / / / /_/ / /_/ / / / / / /_/ / ___ / /_/ / __/ / / / /_(__ )
|
||||
/_/ /_/ \__,_/\__,_/_/_/ /_/\__, /_/ |_\__, /\___/_/ /_/\__/____/
|
||||
/____/ /____/
|
||||
|
||||
______ ___ ___ __
|
||||
/_ __/________ _____/ (_)___ ____ _/ | ____ ____ ____ / /______
|
||||
/ / / ___/ __ `/ __ / / __ \/ __ `/ /| |/ __ `/ _ \/ __ \/ __/ ___/
|
||||
/ / / / / /_/ / /_/ / / / / / /_/ / ___ / /_/ / __/ / / / /_(__ )
|
||||
/_/ /_/ \__,_/\__,_/_/_/ /_/\__, /_/ |_\__, /\___/_/ /_/\__/____/
|
||||
/____/ /____/
|
||||
|
|
|
|||
|
|
@ -1,76 +1,76 @@
|
|||
import threading
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_core.outputs import LLMResult
|
||||
from langchain_core.messages import AIMessage
|
||||
|
||||
|
||||
class StatsCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback handler that tracks LLM calls, tool calls, and token usage."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._lock = threading.Lock()
|
||||
self.llm_calls = 0
|
||||
self.tool_calls = 0
|
||||
self.tokens_in = 0
|
||||
self.tokens_out = 0
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Increment LLM call counter when an LLM starts."""
|
||||
with self._lock:
|
||||
self.llm_calls += 1
|
||||
|
||||
def on_chat_model_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[Any]],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Increment LLM call counter when a chat model starts."""
|
||||
with self._lock:
|
||||
self.llm_calls += 1
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Extract token usage from LLM response."""
|
||||
try:
|
||||
generation = response.generations[0][0]
|
||||
except (IndexError, TypeError):
|
||||
return
|
||||
|
||||
usage_metadata = None
|
||||
if hasattr(generation, "message"):
|
||||
message = generation.message
|
||||
if isinstance(message, AIMessage) and hasattr(message, "usage_metadata"):
|
||||
usage_metadata = message.usage_metadata
|
||||
|
||||
if usage_metadata:
|
||||
with self._lock:
|
||||
self.tokens_in += usage_metadata.get("input_tokens", 0)
|
||||
self.tokens_out += usage_metadata.get("output_tokens", 0)
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Increment tool call counter when a tool starts."""
|
||||
with self._lock:
|
||||
self.tool_calls += 1
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Return current statistics."""
|
||||
with self._lock:
|
||||
return {
|
||||
"llm_calls": self.llm_calls,
|
||||
"tool_calls": self.tool_calls,
|
||||
"tokens_in": self.tokens_in,
|
||||
"tokens_out": self.tokens_out,
|
||||
}
|
||||
import threading
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_core.outputs import LLMResult
|
||||
from langchain_core.messages import AIMessage
|
||||
|
||||
|
||||
class StatsCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback handler that tracks LLM calls, tool calls, and token usage."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._lock = threading.Lock()
|
||||
self.llm_calls = 0
|
||||
self.tool_calls = 0
|
||||
self.tokens_in = 0
|
||||
self.tokens_out = 0
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Increment LLM call counter when an LLM starts."""
|
||||
with self._lock:
|
||||
self.llm_calls += 1
|
||||
|
||||
def on_chat_model_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[Any]],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Increment LLM call counter when a chat model starts."""
|
||||
with self._lock:
|
||||
self.llm_calls += 1
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Extract token usage from LLM response."""
|
||||
try:
|
||||
generation = response.generations[0][0]
|
||||
except (IndexError, TypeError):
|
||||
return
|
||||
|
||||
usage_metadata = None
|
||||
if hasattr(generation, "message"):
|
||||
message = generation.message
|
||||
if isinstance(message, AIMessage) and hasattr(message, "usage_metadata"):
|
||||
usage_metadata = message.usage_metadata
|
||||
|
||||
if usage_metadata:
|
||||
with self._lock:
|
||||
self.tokens_in += usage_metadata.get("input_tokens", 0)
|
||||
self.tokens_out += usage_metadata.get("output_tokens", 0)
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Increment tool call counter when a tool starts."""
|
||||
with self._lock:
|
||||
self.tool_calls += 1
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Return current statistics."""
|
||||
with self._lock:
|
||||
return {
|
||||
"llm_calls": self.llm_calls,
|
||||
"tool_calls": self.tool_calls,
|
||||
"tokens_in": self.tokens_in,
|
||||
"tokens_out": self.tokens_out,
|
||||
}
|
||||
|
|
|
|||
656
cli/utils.py
656
cli/utils.py
|
|
@ -1,328 +1,328 @@
|
|||
import questionary
|
||||
from typing import List, Optional, Tuple, Dict
|
||||
|
||||
from cli.models import AnalystType
|
||||
|
||||
ANALYST_ORDER = [
|
||||
("Market Analyst", AnalystType.MARKET),
|
||||
("Social Media Analyst", AnalystType.SOCIAL),
|
||||
("News Analyst", AnalystType.NEWS),
|
||||
("Fundamentals Analyst", AnalystType.FUNDAMENTALS),
|
||||
]
|
||||
|
||||
|
||||
def get_ticker() -> str:
|
||||
"""Prompt the user to enter a ticker symbol."""
|
||||
ticker = questionary.text(
|
||||
"Enter the ticker symbol to analyze:",
|
||||
validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("text", "fg:green"),
|
||||
("highlighted", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not ticker:
|
||||
console.print("\n[red]No ticker symbol provided. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return ticker.strip().upper()
|
||||
|
||||
|
||||
def get_analysis_date() -> str:
|
||||
"""Prompt the user to enter a date in YYYY-MM-DD format."""
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
def validate_date(date_str: str) -> bool:
|
||||
if not re.match(r"^\d{4}-\d{2}-\d{2}$", date_str):
|
||||
return False
|
||||
try:
|
||||
datetime.strptime(date_str, "%Y-%m-%d")
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
date = questionary.text(
|
||||
"Enter the analysis date (YYYY-MM-DD):",
|
||||
validate=lambda x: validate_date(x.strip())
|
||||
or "Please enter a valid date in YYYY-MM-DD format.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("text", "fg:green"),
|
||||
("highlighted", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not date:
|
||||
console.print("\n[red]No date provided. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return date.strip()
|
||||
|
||||
|
||||
def select_analysts() -> List[AnalystType]:
|
||||
"""Select analysts using an interactive checkbox."""
|
||||
choices = questionary.checkbox(
|
||||
"Select Your [Analysts Team]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value) for display, value in ANALYST_ORDER
|
||||
],
|
||||
instruction="\n- Press Space to select/unselect analysts\n- Press 'a' to select/unselect all\n- Press Enter when done",
|
||||
validate=lambda x: len(x) > 0 or "You must select at least one analyst.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("checkbox-selected", "fg:green"),
|
||||
("selected", "fg:green noinherit"),
|
||||
("highlighted", "noinherit"),
|
||||
("pointer", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not choices:
|
||||
console.print("\n[red]No analysts selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choices
|
||||
|
||||
|
||||
def select_research_depth() -> int:
|
||||
"""Select research depth using an interactive selection."""
|
||||
|
||||
# Define research depth options with their corresponding values
|
||||
DEPTH_OPTIONS = [
|
||||
("Shallow - Quick research, few debate and strategy discussion rounds", 1),
|
||||
("Medium - Middle ground, moderate debate rounds and strategy discussion", 3),
|
||||
("Deep - Comprehensive research, in depth debate and strategy discussion", 5),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Research Depth]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value) for display, value in DEPTH_OPTIONS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:yellow noinherit"),
|
||||
("highlighted", "fg:yellow noinherit"),
|
||||
("pointer", "fg:yellow noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]No research depth selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
|
||||
def select_shallow_thinking_agent(provider) -> str:
|
||||
"""Select shallow thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define shallow thinking llm engine options with their corresponding model names
|
||||
SHALLOW_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"),
|
||||
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"),
|
||||
("GPT-5.2 - Latest flagship", "gpt-5.2"),
|
||||
("GPT-5.1 - Flexible reasoning", "gpt-5.1"),
|
||||
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"),
|
||||
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"),
|
||||
("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
|
||||
("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"),
|
||||
("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"),
|
||||
("Gemini 2.5 Flash Lite - Fast, low-cost", "gemini-2.5-flash-lite"),
|
||||
],
|
||||
"xai": [
|
||||
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
|
||||
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
|
||||
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
|
||||
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
|
||||
],
|
||||
"openrouter": [
|
||||
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
|
||||
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("Qwen3:latest (8B, local)", "qwen3:latest"),
|
||||
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
|
||||
("GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"),
|
||||
],
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Quick-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print(
|
||||
"\n[red]No shallow thinking llm engine selected. Exiting...[/red]"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
|
||||
def select_deep_thinking_agent(provider) -> str:
|
||||
"""Select deep thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define deep thinking llm engine options with their corresponding model names
|
||||
DEEP_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-5.2 - Latest flagship", "gpt-5.2"),
|
||||
("GPT-5.1 - Flexible reasoning", "gpt-5.1"),
|
||||
("GPT-5 - Advanced reasoning", "gpt-5"),
|
||||
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"),
|
||||
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"),
|
||||
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"),
|
||||
("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"),
|
||||
("Claude Opus 4.1 - Most capable model", "claude-opus-4-1-20250805"),
|
||||
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"),
|
||||
("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"),
|
||||
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
|
||||
("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"),
|
||||
],
|
||||
"xai": [
|
||||
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
|
||||
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
|
||||
("Grok 4 - Flagship model", "grok-4-0709"),
|
||||
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
|
||||
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
|
||||
],
|
||||
"openrouter": [
|
||||
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
|
||||
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"),
|
||||
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
|
||||
("Qwen3:latest (8B, local)", "qwen3:latest"),
|
||||
],
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Deep-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]No deep thinking llm engine selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
def select_llm_provider() -> tuple[str, str]:
|
||||
"""Select the OpenAI api url using interactive selection."""
|
||||
# Define OpenAI api options with their corresponding endpoints
|
||||
BASE_URLS = [
|
||||
("OpenAI", "https://api.openai.com/v1"),
|
||||
("Google", "https://generativelanguage.googleapis.com/v1"),
|
||||
("Anthropic", "https://api.anthropic.com/"),
|
||||
("xAI", "https://api.x.ai/v1"),
|
||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||
("Ollama", "http://localhost:11434/v1"),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
"Select your LLM Provider:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=(display, value))
|
||||
for display, value in BASE_URLS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
display_name, url = choice
|
||||
print(f"You selected: {display_name}\tURL: {url}")
|
||||
|
||||
return display_name, url
|
||||
|
||||
|
||||
def ask_openai_reasoning_effort() -> str:
|
||||
"""Ask for OpenAI reasoning effort level."""
|
||||
choices = [
|
||||
questionary.Choice("Medium (Default)", "medium"),
|
||||
questionary.Choice("High (More thorough)", "high"),
|
||||
questionary.Choice("Low (Faster)", "low"),
|
||||
]
|
||||
return questionary.select(
|
||||
"Select Reasoning Effort:",
|
||||
choices=choices,
|
||||
style=questionary.Style([
|
||||
("selected", "fg:cyan noinherit"),
|
||||
("highlighted", "fg:cyan noinherit"),
|
||||
("pointer", "fg:cyan noinherit"),
|
||||
]),
|
||||
).ask()
|
||||
|
||||
|
||||
def ask_gemini_thinking_config() -> str | None:
|
||||
"""Ask for Gemini thinking configuration.
|
||||
|
||||
Returns thinking_level: "high" or "minimal".
|
||||
Client maps to appropriate API param based on model series.
|
||||
"""
|
||||
return questionary.select(
|
||||
"Select Thinking Mode:",
|
||||
choices=[
|
||||
questionary.Choice("Enable Thinking (recommended)", "high"),
|
||||
questionary.Choice("Minimal/Disable Thinking", "minimal"),
|
||||
],
|
||||
style=questionary.Style([
|
||||
("selected", "fg:green noinherit"),
|
||||
("highlighted", "fg:green noinherit"),
|
||||
("pointer", "fg:green noinherit"),
|
||||
]),
|
||||
).ask()
|
||||
import questionary
|
||||
from typing import List, Optional, Tuple, Dict
|
||||
|
||||
from cli.models import AnalystType
|
||||
|
||||
ANALYST_ORDER = [
|
||||
("Market Analyst", AnalystType.MARKET),
|
||||
("Social Media Analyst", AnalystType.SOCIAL),
|
||||
("News Analyst", AnalystType.NEWS),
|
||||
("Fundamentals Analyst", AnalystType.FUNDAMENTALS),
|
||||
]
|
||||
|
||||
|
||||
def get_ticker() -> str:
|
||||
"""Prompt the user to enter a ticker symbol."""
|
||||
ticker = questionary.text(
|
||||
"Enter the ticker symbol to analyze:",
|
||||
validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("text", "fg:green"),
|
||||
("highlighted", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not ticker:
|
||||
console.print("\n[red]No ticker symbol provided. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return ticker.strip().upper()
|
||||
|
||||
|
||||
def get_analysis_date() -> str:
|
||||
"""Prompt the user to enter a date in YYYY-MM-DD format."""
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
def validate_date(date_str: str) -> bool:
|
||||
if not re.match(r"^\d{4}-\d{2}-\d{2}$", date_str):
|
||||
return False
|
||||
try:
|
||||
datetime.strptime(date_str, "%Y-%m-%d")
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
date = questionary.text(
|
||||
"Enter the analysis date (YYYY-MM-DD):",
|
||||
validate=lambda x: validate_date(x.strip())
|
||||
or "Please enter a valid date in YYYY-MM-DD format.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("text", "fg:green"),
|
||||
("highlighted", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not date:
|
||||
console.print("\n[red]No date provided. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return date.strip()
|
||||
|
||||
|
||||
def select_analysts() -> List[AnalystType]:
|
||||
"""Select analysts using an interactive checkbox."""
|
||||
choices = questionary.checkbox(
|
||||
"Select Your [Analysts Team]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value) for display, value in ANALYST_ORDER
|
||||
],
|
||||
instruction="\n- Press Space to select/unselect analysts\n- Press 'a' to select/unselect all\n- Press Enter when done",
|
||||
validate=lambda x: len(x) > 0 or "You must select at least one analyst.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("checkbox-selected", "fg:green"),
|
||||
("selected", "fg:green noinherit"),
|
||||
("highlighted", "noinherit"),
|
||||
("pointer", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not choices:
|
||||
console.print("\n[red]No analysts selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choices
|
||||
|
||||
|
||||
def select_research_depth() -> int:
|
||||
"""Select research depth using an interactive selection."""
|
||||
|
||||
# Define research depth options with their corresponding values
|
||||
DEPTH_OPTIONS = [
|
||||
("Shallow - Quick research, few debate and strategy discussion rounds", 1),
|
||||
("Medium - Middle ground, moderate debate rounds and strategy discussion", 3),
|
||||
("Deep - Comprehensive research, in depth debate and strategy discussion", 5),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Research Depth]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value) for display, value in DEPTH_OPTIONS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:yellow noinherit"),
|
||||
("highlighted", "fg:yellow noinherit"),
|
||||
("pointer", "fg:yellow noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]No research depth selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
|
||||
def select_shallow_thinking_agent(provider) -> str:
|
||||
"""Select shallow thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define shallow thinking llm engine options with their corresponding model names
|
||||
SHALLOW_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"),
|
||||
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"),
|
||||
("GPT-5.2 - Latest flagship", "gpt-5.2"),
|
||||
("GPT-5.1 - Flexible reasoning", "gpt-5.1"),
|
||||
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"),
|
||||
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"),
|
||||
("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
|
||||
("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"),
|
||||
("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"),
|
||||
("Gemini 2.5 Flash Lite - Fast, low-cost", "gemini-2.5-flash-lite"),
|
||||
],
|
||||
"xai": [
|
||||
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
|
||||
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
|
||||
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
|
||||
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
|
||||
],
|
||||
"openrouter": [
|
||||
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
|
||||
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("Qwen3:latest (8B, local)", "qwen3:latest"),
|
||||
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
|
||||
("GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"),
|
||||
],
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Quick-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print(
|
||||
"\n[red]No shallow thinking llm engine selected. Exiting...[/red]"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
|
||||
def select_deep_thinking_agent(provider) -> str:
|
||||
"""Select deep thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define deep thinking llm engine options with their corresponding model names
|
||||
DEEP_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-5.2 - Latest flagship", "gpt-5.2"),
|
||||
("GPT-5.1 - Flexible reasoning", "gpt-5.1"),
|
||||
("GPT-5 - Advanced reasoning", "gpt-5"),
|
||||
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"),
|
||||
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"),
|
||||
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"),
|
||||
("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"),
|
||||
("Claude Opus 4.1 - Most capable model", "claude-opus-4-1-20250805"),
|
||||
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"),
|
||||
("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"),
|
||||
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
|
||||
("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"),
|
||||
],
|
||||
"xai": [
|
||||
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
|
||||
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
|
||||
("Grok 4 - Flagship model", "grok-4-0709"),
|
||||
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
|
||||
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
|
||||
],
|
||||
"openrouter": [
|
||||
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
|
||||
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"),
|
||||
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
|
||||
("Qwen3:latest (8B, local)", "qwen3:latest"),
|
||||
],
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Deep-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]No deep thinking llm engine selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
def select_llm_provider() -> tuple[str, str]:
|
||||
"""Select the OpenAI api url using interactive selection."""
|
||||
# Define OpenAI api options with their corresponding endpoints
|
||||
BASE_URLS = [
|
||||
("OpenAI", "https://api.openai.com/v1"),
|
||||
("Google", "https://generativelanguage.googleapis.com/v1"),
|
||||
("Anthropic", "https://api.anthropic.com/"),
|
||||
("xAI", "https://api.x.ai/v1"),
|
||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||
("Ollama", "http://localhost:11434/v1"),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
"Select your LLM Provider:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=(display, value))
|
||||
for display, value in BASE_URLS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
display_name, url = choice
|
||||
print(f"You selected: {display_name}\tURL: {url}")
|
||||
|
||||
return display_name, url
|
||||
|
||||
|
||||
def ask_openai_reasoning_effort() -> str:
|
||||
"""Ask for OpenAI reasoning effort level."""
|
||||
choices = [
|
||||
questionary.Choice("Medium (Default)", "medium"),
|
||||
questionary.Choice("High (More thorough)", "high"),
|
||||
questionary.Choice("Low (Faster)", "low"),
|
||||
]
|
||||
return questionary.select(
|
||||
"Select Reasoning Effort:",
|
||||
choices=choices,
|
||||
style=questionary.Style([
|
||||
("selected", "fg:cyan noinherit"),
|
||||
("highlighted", "fg:cyan noinherit"),
|
||||
("pointer", "fg:cyan noinherit"),
|
||||
]),
|
||||
).ask()
|
||||
|
||||
|
||||
def ask_gemini_thinking_config() -> str | None:
|
||||
"""Ask for Gemini thinking configuration.
|
||||
|
||||
Returns thinking_level: "high" or "minimal".
|
||||
Client maps to appropriate API param based on model series.
|
||||
"""
|
||||
return questionary.select(
|
||||
"Select Thinking Mode:",
|
||||
choices=[
|
||||
questionary.Choice("Enable Thinking (recommended)", "high"),
|
||||
questionary.Choice("Minimal/Disable Thinking", "minimal"),
|
||||
],
|
||||
style=questionary.Style([
|
||||
("selected", "fg:green noinherit"),
|
||||
("highlighted", "fg:green noinherit"),
|
||||
("pointer", "fg:green noinherit"),
|
||||
]),
|
||||
).ask()
|
||||
|
|
|
|||
|
|
@ -1,67 +1,67 @@
|
|||
# TradingAgents Chainlit Web UI — Design
|
||||
|
||||
## Summary
|
||||
|
||||
Add a Chainlit web UI to TradingAgents so it can be deployed on Railway as a web service. Users interact via chat messages (e.g., "Analyze NVDA") and see live agent progress streamed into the browser.
|
||||
|
||||
## Architecture
|
||||
|
||||
Thin Chainlit wrapper around the existing `TradingAgentsGraph` programmatic API. ~150 lines of new code in a single `app.py`.
|
||||
|
||||
## Components
|
||||
|
||||
### `app.py` (Chainlit entry point)
|
||||
|
||||
- `@cl.on_chat_start` — Welcome message explaining usage (e.g., "Type a ticker like `NVDA` or `Analyze AAPL 2024-12-01`")
|
||||
- `@cl.on_message` — Parse ticker + optional date from user message, create `TradingAgentsGraph` with Anthropic config, run `propagate()` in debug mode, stream Chainlit `Step` messages for each agent phase, send final decision as formatted message
|
||||
|
||||
### `Dockerfile`
|
||||
|
||||
- Python 3.13-slim base
|
||||
- Install requirements.txt
|
||||
- Expose `$PORT`
|
||||
- `CMD: chainlit run app.py --host 0.0.0.0 --port $PORT`
|
||||
|
||||
### `railway.toml`
|
||||
|
||||
- Build from Dockerfile
|
||||
- Health check on `/`
|
||||
|
||||
### Railway Environment Variables
|
||||
|
||||
- `ANTHROPIC_API_KEY` — required, for Claude models
|
||||
- `PORT` — auto-set by Railway
|
||||
|
||||
## LLM Configuration
|
||||
|
||||
- Provider: `anthropic`
|
||||
- Quick-think model: `claude-haiku-4-5-20251001`
|
||||
- Deep-think model: `claude-sonnet-4-5-20241022`
|
||||
- Data vendor: `yfinance` (no extra API keys needed)
|
||||
|
||||
## Data Flow
|
||||
|
||||
```
|
||||
User message: "Analyze NVDA"
|
||||
-> Parse: ticker=NVDA, date=today
|
||||
-> TradingAgentsGraph(config={anthropic, haiku/sonnet})
|
||||
-> graph.propagate("NVDA", "2026-02-20")
|
||||
-> Debug stream chunks
|
||||
-> Each chunk -> Chainlit Step (Analyst, Research, Trading, Risk, Portfolio)
|
||||
-> Final decision -> formatted Chainlit message with markdown
|
||||
```
|
||||
|
||||
## Message Parsing
|
||||
|
||||
Simple regex/string parsing:
|
||||
- `"NVDA"` -> ticker=NVDA, date=today
|
||||
- `"Analyze AAPL 2024-12-01"` -> ticker=AAPL, date=2024-12-01
|
||||
- `"What's the outlook for TSLA?"` -> ticker=TSLA, date=today
|
||||
- Extract uppercase 1-5 letter words as potential tickers
|
||||
|
||||
## Deployment
|
||||
|
||||
1. Push changes to `github.com/dtarkent2-sys/TradingAgents` main branch
|
||||
2. Create Railway service from GitHub repo
|
||||
3. Set `ANTHROPIC_API_KEY` env var
|
||||
4. Railway auto-deploys, Chainlit serves on assigned PORT
|
||||
# TradingAgents Chainlit Web UI — Design
|
||||
|
||||
## Summary
|
||||
|
||||
Add a Chainlit web UI to TradingAgents so it can be deployed on Railway as a web service. Users interact via chat messages (e.g., "Analyze NVDA") and see live agent progress streamed into the browser.
|
||||
|
||||
## Architecture
|
||||
|
||||
Thin Chainlit wrapper around the existing `TradingAgentsGraph` programmatic API. ~150 lines of new code in a single `app.py`.
|
||||
|
||||
## Components
|
||||
|
||||
### `app.py` (Chainlit entry point)
|
||||
|
||||
- `@cl.on_chat_start` — Welcome message explaining usage (e.g., "Type a ticker like `NVDA` or `Analyze AAPL 2024-12-01`")
|
||||
- `@cl.on_message` — Parse ticker + optional date from user message, create `TradingAgentsGraph` with Anthropic config, run `propagate()` in debug mode, stream Chainlit `Step` messages for each agent phase, send final decision as formatted message
|
||||
|
||||
### `Dockerfile`
|
||||
|
||||
- Python 3.13-slim base
|
||||
- Install requirements.txt
|
||||
- Expose `$PORT`
|
||||
- `CMD: chainlit run app.py --host 0.0.0.0 --port $PORT`
|
||||
|
||||
### `railway.toml`
|
||||
|
||||
- Build from Dockerfile
|
||||
- Health check on `/`
|
||||
|
||||
### Railway Environment Variables
|
||||
|
||||
- `ANTHROPIC_API_KEY` — required, for Claude models
|
||||
- `PORT` — auto-set by Railway
|
||||
|
||||
## LLM Configuration
|
||||
|
||||
- Provider: `anthropic`
|
||||
- Quick-think model: `claude-haiku-4-5-20251001`
|
||||
- Deep-think model: `claude-sonnet-4-5-20241022`
|
||||
- Data vendor: `yfinance` (no extra API keys needed)
|
||||
|
||||
## Data Flow
|
||||
|
||||
```
|
||||
User message: "Analyze NVDA"
|
||||
-> Parse: ticker=NVDA, date=today
|
||||
-> TradingAgentsGraph(config={anthropic, haiku/sonnet})
|
||||
-> graph.propagate("NVDA", "2026-02-20")
|
||||
-> Debug stream chunks
|
||||
-> Each chunk -> Chainlit Step (Analyst, Research, Trading, Risk, Portfolio)
|
||||
-> Final decision -> formatted Chainlit message with markdown
|
||||
```
|
||||
|
||||
## Message Parsing
|
||||
|
||||
Simple regex/string parsing:
|
||||
- `"NVDA"` -> ticker=NVDA, date=today
|
||||
- `"Analyze AAPL 2024-12-01"` -> ticker=AAPL, date=2024-12-01
|
||||
- `"What's the outlook for TSLA?"` -> ticker=TSLA, date=today
|
||||
- Extract uppercase 1-5 letter words as potential tickers
|
||||
|
||||
## Deployment
|
||||
|
||||
1. Push changes to `github.com/dtarkent2-sys/TradingAgents` main branch
|
||||
2. Create Railway service from GitHub repo
|
||||
3. Set `ANTHROPIC_API_KEY` env var
|
||||
4. Railway auto-deploys, Chainlit serves on assigned PORT
|
||||
|
|
|
|||
66
main.py
66
main.py
|
|
@ -1,33 +1,33 @@
|
|||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
# Create a custom config
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["deep_think_llm"] = os.environ.get("DEEP_THINK_LLM", "gpt-5-mini")
|
||||
config["quick_think_llm"] = os.environ.get("QUICK_THINK_LLM", "gpt-5-mini")
|
||||
config["max_debate_rounds"] = 1 # Increase debate rounds
|
||||
|
||||
# Configure data vendors (default uses yfinance, no extra API keys needed)
|
||||
config["data_vendors"] = {
|
||||
"core_stock_apis": "yfinance", # Options: alpha_vantage, yfinance
|
||||
"technical_indicators": "yfinance", # Options: alpha_vantage, yfinance
|
||||
"fundamental_data": "yfinance", # Options: alpha_vantage, yfinance
|
||||
"news_data": "yfinance", # Options: alpha_vantage, yfinance
|
||||
}
|
||||
|
||||
# Initialize with custom config
|
||||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
|
||||
# forward propagate
|
||||
_, decision = ta.propagate("NVDA", "2024-05-10")
|
||||
print(decision)
|
||||
|
||||
# Memorize mistakes and reflect
|
||||
# ta.reflect_and_remember(1000) # parameter is the position returns
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
# Create a custom config
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["deep_think_llm"] = os.environ.get("DEEP_THINK_LLM", "gpt-5-mini")
|
||||
config["quick_think_llm"] = os.environ.get("QUICK_THINK_LLM", "gpt-5-mini")
|
||||
config["max_debate_rounds"] = 1 # Increase debate rounds
|
||||
|
||||
# Configure data vendors (default uses yfinance, no extra API keys needed)
|
||||
config["data_vendors"] = {
|
||||
"core_stock_apis": "yfinance", # Options: alpha_vantage, yfinance
|
||||
"technical_indicators": "yfinance", # Options: alpha_vantage, yfinance
|
||||
"fundamental_data": "yfinance", # Options: alpha_vantage, yfinance
|
||||
"news_data": "yfinance", # Options: alpha_vantage, yfinance
|
||||
}
|
||||
|
||||
# Initialize with custom config
|
||||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
|
||||
# forward propagate
|
||||
_, decision = ta.propagate("NVDA", "2024-05-10")
|
||||
print(decision)
|
||||
|
||||
# Memorize mistakes and reflect
|
||||
# ta.reflect_and_remember(1000) # parameter is the position returns
|
||||
|
|
|
|||
3404
nvda_output.txt
3404
nvda_output.txt
File diff suppressed because it is too large
Load Diff
3790
nvda_output2.txt
3790
nvda_output2.txt
File diff suppressed because it is too large
Load Diff
|
|
@ -1,36 +1,36 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "tradingagents"
|
||||
version = "0.2.0"
|
||||
description = "TradingAgents: Multi-Agents LLM Financial Trading Framework"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"langchain-core>=0.3.81",
|
||||
"langchain-anthropic>=0.3.15",
|
||||
"langchain-experimental>=0.3.4",
|
||||
"langchain-google-genai>=2.1.5",
|
||||
"langchain-openai>=0.3.23",
|
||||
"langgraph>=0.4.8",
|
||||
"pandas>=2.3.0",
|
||||
"pytz>=2025.2",
|
||||
"rank-bm25>=0.2.2",
|
||||
"requests>=2.32.4",
|
||||
"setuptools>=80.9.0",
|
||||
"stockstats>=0.6.5",
|
||||
"tqdm>=4.67.1",
|
||||
"typing-extensions>=4.14.0",
|
||||
"yfinance>=0.2.63",
|
||||
"fastapi>=0.115.0",
|
||||
"uvicorn[standard]>=0.30.0",
|
||||
"sse-starlette>=2.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
tradingagents = "cli.main:app"
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
include = ["tradingagents*", "cli*"]
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "tradingagents"
|
||||
version = "0.2.0"
|
||||
description = "TradingAgents: Multi-Agents LLM Financial Trading Framework"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"langchain-core>=0.3.81",
|
||||
"langchain-anthropic>=0.3.15",
|
||||
"langchain-experimental>=0.3.4",
|
||||
"langchain-google-genai>=2.1.5",
|
||||
"langchain-openai>=0.3.23",
|
||||
"langgraph>=0.4.8",
|
||||
"pandas>=2.3.0",
|
||||
"pytz>=2025.2",
|
||||
"rank-bm25>=0.2.2",
|
||||
"requests>=2.32.4",
|
||||
"setuptools>=80.9.0",
|
||||
"stockstats>=0.6.5",
|
||||
"tqdm>=4.67.1",
|
||||
"typing-extensions>=4.14.0",
|
||||
"yfinance>=0.2.63",
|
||||
"fastapi>=0.115.0",
|
||||
"uvicorn[standard]>=0.30.0",
|
||||
"sse-starlette>=2.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
tradingagents = "cli.main:app"
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
include = ["tradingagents*", "cli*"]
|
||||
|
|
|
|||
22
test.py
22
test.py
|
|
@ -1,11 +1,11 @@
|
|||
import time
|
||||
from tradingagents.dataflows.y_finance import get_YFin_data_online, get_stock_stats_indicators_window, get_balance_sheet as get_yfinance_balance_sheet, get_cashflow as get_yfinance_cashflow, get_income_statement as get_yfinance_income_statement, get_insider_transactions as get_yfinance_insider_transactions
|
||||
|
||||
print("Testing optimized implementation with 30-day lookback:")
|
||||
start_time = time.time()
|
||||
result = get_stock_stats_indicators_window("AAPL", "macd", "2024-11-01", 30)
|
||||
end_time = time.time()
|
||||
|
||||
print(f"Execution time: {end_time - start_time:.2f} seconds")
|
||||
print(f"Result length: {len(result)} characters")
|
||||
print(result)
|
||||
import time
|
||||
from tradingagents.dataflows.y_finance import get_YFin_data_online, get_stock_stats_indicators_window, get_balance_sheet as get_yfinance_balance_sheet, get_cashflow as get_yfinance_cashflow, get_income_statement as get_yfinance_income_statement, get_insider_transactions as get_yfinance_insider_transactions
|
||||
|
||||
print("Testing optimized implementation with 30-day lookback:")
|
||||
start_time = time.time()
|
||||
result = get_stock_stats_indicators_window("AAPL", "macd", "2024-11-01", 30)
|
||||
end_time = time.time()
|
||||
|
||||
print(f"Execution time: {end_time - start_time:.2f} seconds")
|
||||
print(f"Result length: {len(result)} characters")
|
||||
print(result)
|
||||
|
|
|
|||
|
|
@ -1,40 +1,40 @@
|
|||
from .utils.agent_utils import create_msg_delete
|
||||
from .utils.agent_states import AgentState, InvestDebateState, RiskDebateState
|
||||
from .utils.memory import FinancialSituationMemory
|
||||
|
||||
from .analysts.fundamentals_analyst import create_fundamentals_analyst
|
||||
from .analysts.market_analyst import create_market_analyst
|
||||
from .analysts.news_analyst import create_news_analyst
|
||||
from .analysts.social_media_analyst import create_social_media_analyst
|
||||
|
||||
from .researchers.bear_researcher import create_bear_researcher
|
||||
from .researchers.bull_researcher import create_bull_researcher
|
||||
|
||||
from .risk_mgmt.aggressive_debator import create_aggressive_debator
|
||||
from .risk_mgmt.conservative_debator import create_conservative_debator
|
||||
from .risk_mgmt.neutral_debator import create_neutral_debator
|
||||
|
||||
from .managers.research_manager import create_research_manager
|
||||
from .managers.risk_manager import create_risk_manager
|
||||
|
||||
from .trader.trader import create_trader
|
||||
|
||||
__all__ = [
|
||||
"FinancialSituationMemory",
|
||||
"AgentState",
|
||||
"create_msg_delete",
|
||||
"InvestDebateState",
|
||||
"RiskDebateState",
|
||||
"create_bear_researcher",
|
||||
"create_bull_researcher",
|
||||
"create_research_manager",
|
||||
"create_fundamentals_analyst",
|
||||
"create_market_analyst",
|
||||
"create_neutral_debator",
|
||||
"create_news_analyst",
|
||||
"create_aggressive_debator",
|
||||
"create_risk_manager",
|
||||
"create_conservative_debator",
|
||||
"create_social_media_analyst",
|
||||
"create_trader",
|
||||
]
|
||||
from .utils.agent_utils import create_msg_delete
|
||||
from .utils.agent_states import AgentState, InvestDebateState, RiskDebateState
|
||||
from .utils.memory import FinancialSituationMemory
|
||||
|
||||
from .analysts.fundamentals_analyst import create_fundamentals_analyst
|
||||
from .analysts.market_analyst import create_market_analyst
|
||||
from .analysts.news_analyst import create_news_analyst
|
||||
from .analysts.social_media_analyst import create_social_media_analyst
|
||||
|
||||
from .researchers.bear_researcher import create_bear_researcher
|
||||
from .researchers.bull_researcher import create_bull_researcher
|
||||
|
||||
from .risk_mgmt.aggressive_debator import create_aggressive_debator
|
||||
from .risk_mgmt.conservative_debator import create_conservative_debator
|
||||
from .risk_mgmt.neutral_debator import create_neutral_debator
|
||||
|
||||
from .managers.research_manager import create_research_manager
|
||||
from .managers.risk_manager import create_risk_manager
|
||||
|
||||
from .trader.trader import create_trader
|
||||
|
||||
__all__ = [
|
||||
"FinancialSituationMemory",
|
||||
"AgentState",
|
||||
"create_msg_delete",
|
||||
"InvestDebateState",
|
||||
"RiskDebateState",
|
||||
"create_bear_researcher",
|
||||
"create_bull_researcher",
|
||||
"create_research_manager",
|
||||
"create_fundamentals_analyst",
|
||||
"create_market_analyst",
|
||||
"create_neutral_debator",
|
||||
"create_news_analyst",
|
||||
"create_aggressive_debator",
|
||||
"create_risk_manager",
|
||||
"create_conservative_debator",
|
||||
"create_social_media_analyst",
|
||||
"create_trader",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,63 +1,63 @@
|
|||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.agent_utils import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement, get_insider_transactions
|
||||
from tradingagents.dataflows.config import get_config
|
||||
|
||||
|
||||
def create_fundamentals_analyst(llm):
|
||||
def fundamentals_analyst_node(state):
|
||||
current_date = state["trade_date"]
|
||||
ticker = state["company_of_interest"]
|
||||
company_name = state["company_of_interest"]
|
||||
|
||||
tools = [
|
||||
get_fundamentals,
|
||||
get_balance_sheet,
|
||||
get_cashflow,
|
||||
get_income_statement,
|
||||
]
|
||||
|
||||
system_message = (
|
||||
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, and company financial history to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
||||
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."
|
||||
+ " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements.",
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful AI assistant, collaborating with other assistants."
|
||||
" Use the provided tools to progress towards answering the question."
|
||||
" If you are unable to fully answer, that's OK; another assistant with different tools"
|
||||
" will help where you left off. Execute what you can to make progress."
|
||||
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
|
||||
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
|
||||
" You have access to the following tools: {tool_names}.\n{system_message}"
|
||||
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
|
||||
),
|
||||
MessagesPlaceholder(variable_name="messages"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt = prompt.partial(system_message=system_message)
|
||||
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
|
||||
prompt = prompt.partial(current_date=current_date)
|
||||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"fundamentals_report": report,
|
||||
}
|
||||
|
||||
return fundamentals_analyst_node
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.agent_utils import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement, get_insider_transactions
|
||||
from tradingagents.dataflows.config import get_config
|
||||
|
||||
|
||||
def create_fundamentals_analyst(llm):
|
||||
def fundamentals_analyst_node(state):
|
||||
current_date = state["trade_date"]
|
||||
ticker = state["company_of_interest"]
|
||||
company_name = state["company_of_interest"]
|
||||
|
||||
tools = [
|
||||
get_fundamentals,
|
||||
get_balance_sheet,
|
||||
get_cashflow,
|
||||
get_income_statement,
|
||||
]
|
||||
|
||||
system_message = (
|
||||
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, and company financial history to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
||||
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."
|
||||
+ " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements.",
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful AI assistant, collaborating with other assistants."
|
||||
" Use the provided tools to progress towards answering the question."
|
||||
" If you are unable to fully answer, that's OK; another assistant with different tools"
|
||||
" will help where you left off. Execute what you can to make progress."
|
||||
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
|
||||
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
|
||||
" You have access to the following tools: {tool_names}.\n{system_message}"
|
||||
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
|
||||
),
|
||||
MessagesPlaceholder(variable_name="messages"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt = prompt.partial(system_message=system_message)
|
||||
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
|
||||
prompt = prompt.partial(current_date=current_date)
|
||||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"fundamentals_report": report,
|
||||
}
|
||||
|
||||
return fundamentals_analyst_node
|
||||
|
|
|
|||
|
|
@ -1,85 +1,85 @@
|
|||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.agent_utils import get_stock_data, get_indicators
|
||||
from tradingagents.dataflows.config import get_config
|
||||
|
||||
|
||||
def create_market_analyst(llm):
|
||||
|
||||
def market_analyst_node(state):
|
||||
current_date = state["trade_date"]
|
||||
ticker = state["company_of_interest"]
|
||||
company_name = state["company_of_interest"]
|
||||
|
||||
tools = [
|
||||
get_stock_data,
|
||||
get_indicators,
|
||||
]
|
||||
|
||||
system_message = (
|
||||
"""You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are:
|
||||
|
||||
Moving Averages:
|
||||
- close_50_sma: 50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.
|
||||
- close_200_sma: 200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries.
|
||||
- close_10_ema: 10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals.
|
||||
|
||||
MACD Related:
|
||||
- macd: MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets.
|
||||
- macds: MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives.
|
||||
- macdh: MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets.
|
||||
|
||||
Momentum Indicators:
|
||||
- rsi: RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis.
|
||||
|
||||
Volatility Indicators:
|
||||
- boll: Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals.
|
||||
- boll_ub: Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends.
|
||||
- boll_lb: Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals.
|
||||
- atr: ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy.
|
||||
|
||||
Volume-Based Indicators:
|
||||
- vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses.
|
||||
|
||||
- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."""
|
||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful AI assistant, collaborating with other assistants."
|
||||
" Use the provided tools to progress towards answering the question."
|
||||
" If you are unable to fully answer, that's OK; another assistant with different tools"
|
||||
" will help where you left off. Execute what you can to make progress."
|
||||
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
|
||||
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
|
||||
" You have access to the following tools: {tool_names}.\n{system_message}"
|
||||
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
|
||||
),
|
||||
MessagesPlaceholder(variable_name="messages"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt = prompt.partial(system_message=system_message)
|
||||
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
|
||||
prompt = prompt.partial(current_date=current_date)
|
||||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"market_report": report,
|
||||
}
|
||||
|
||||
return market_analyst_node
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.agent_utils import get_stock_data, get_indicators
|
||||
from tradingagents.dataflows.config import get_config
|
||||
|
||||
|
||||
def create_market_analyst(llm):
|
||||
|
||||
def market_analyst_node(state):
|
||||
current_date = state["trade_date"]
|
||||
ticker = state["company_of_interest"]
|
||||
company_name = state["company_of_interest"]
|
||||
|
||||
tools = [
|
||||
get_stock_data,
|
||||
get_indicators,
|
||||
]
|
||||
|
||||
system_message = (
|
||||
"""You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are:
|
||||
|
||||
Moving Averages:
|
||||
- close_50_sma: 50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.
|
||||
- close_200_sma: 200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries.
|
||||
- close_10_ema: 10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals.
|
||||
|
||||
MACD Related:
|
||||
- macd: MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets.
|
||||
- macds: MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives.
|
||||
- macdh: MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets.
|
||||
|
||||
Momentum Indicators:
|
||||
- rsi: RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis.
|
||||
|
||||
Volatility Indicators:
|
||||
- boll: Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals.
|
||||
- boll_ub: Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends.
|
||||
- boll_lb: Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals.
|
||||
- atr: ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy.
|
||||
|
||||
Volume-Based Indicators:
|
||||
- vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses.
|
||||
|
||||
- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."""
|
||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful AI assistant, collaborating with other assistants."
|
||||
" Use the provided tools to progress towards answering the question."
|
||||
" If you are unable to fully answer, that's OK; another assistant with different tools"
|
||||
" will help where you left off. Execute what you can to make progress."
|
||||
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
|
||||
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
|
||||
" You have access to the following tools: {tool_names}.\n{system_message}"
|
||||
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
|
||||
),
|
||||
MessagesPlaceholder(variable_name="messages"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt = prompt.partial(system_message=system_message)
|
||||
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
|
||||
prompt = prompt.partial(current_date=current_date)
|
||||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"market_report": report,
|
||||
}
|
||||
|
||||
return market_analyst_node
|
||||
|
|
|
|||
|
|
@ -1,58 +1,58 @@
|
|||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.agent_utils import get_news, get_global_news
|
||||
from tradingagents.dataflows.config import get_config
|
||||
|
||||
|
||||
def create_news_analyst(llm):
|
||||
def news_analyst_node(state):
|
||||
current_date = state["trade_date"]
|
||||
ticker = state["company_of_interest"]
|
||||
|
||||
tools = [
|
||||
get_news,
|
||||
get_global_news,
|
||||
]
|
||||
|
||||
system_message = (
|
||||
"You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful AI assistant, collaborating with other assistants."
|
||||
" Use the provided tools to progress towards answering the question."
|
||||
" If you are unable to fully answer, that's OK; another assistant with different tools"
|
||||
" will help where you left off. Execute what you can to make progress."
|
||||
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
|
||||
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
|
||||
" You have access to the following tools: {tool_names}.\n{system_message}"
|
||||
"For your reference, the current date is {current_date}. We are looking at the company {ticker}",
|
||||
),
|
||||
MessagesPlaceholder(variable_name="messages"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt = prompt.partial(system_message=system_message)
|
||||
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
|
||||
prompt = prompt.partial(current_date=current_date)
|
||||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"news_report": report,
|
||||
}
|
||||
|
||||
return news_analyst_node
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.agent_utils import get_news, get_global_news
|
||||
from tradingagents.dataflows.config import get_config
|
||||
|
||||
|
||||
def create_news_analyst(llm):
|
||||
def news_analyst_node(state):
|
||||
current_date = state["trade_date"]
|
||||
ticker = state["company_of_interest"]
|
||||
|
||||
tools = [
|
||||
get_news,
|
||||
get_global_news,
|
||||
]
|
||||
|
||||
system_message = (
|
||||
"You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful AI assistant, collaborating with other assistants."
|
||||
" Use the provided tools to progress towards answering the question."
|
||||
" If you are unable to fully answer, that's OK; another assistant with different tools"
|
||||
" will help where you left off. Execute what you can to make progress."
|
||||
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
|
||||
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
|
||||
" You have access to the following tools: {tool_names}.\n{system_message}"
|
||||
"For your reference, the current date is {current_date}. We are looking at the company {ticker}",
|
||||
),
|
||||
MessagesPlaceholder(variable_name="messages"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt = prompt.partial(system_message=system_message)
|
||||
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
|
||||
prompt = prompt.partial(current_date=current_date)
|
||||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"news_report": report,
|
||||
}
|
||||
|
||||
return news_analyst_node
|
||||
|
|
|
|||
|
|
@ -1,59 +1,59 @@
|
|||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.agent_utils import get_news
|
||||
from tradingagents.dataflows.config import get_config
|
||||
|
||||
|
||||
def create_social_media_analyst(llm):
|
||||
def social_media_analyst_node(state):
|
||||
current_date = state["trade_date"]
|
||||
ticker = state["company_of_interest"]
|
||||
company_name = state["company_of_interest"]
|
||||
|
||||
tools = [
|
||||
get_news,
|
||||
]
|
||||
|
||||
system_message = (
|
||||
"You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""",
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful AI assistant, collaborating with other assistants."
|
||||
" Use the provided tools to progress towards answering the question."
|
||||
" If you are unable to fully answer, that's OK; another assistant with different tools"
|
||||
" will help where you left off. Execute what you can to make progress."
|
||||
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
|
||||
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
|
||||
" You have access to the following tools: {tool_names}.\n{system_message}"
|
||||
"For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}",
|
||||
),
|
||||
MessagesPlaceholder(variable_name="messages"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt = prompt.partial(system_message=system_message)
|
||||
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
|
||||
prompt = prompt.partial(current_date=current_date)
|
||||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"sentiment_report": report,
|
||||
}
|
||||
|
||||
return social_media_analyst_node
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.agent_utils import get_news
|
||||
from tradingagents.dataflows.config import get_config
|
||||
|
||||
|
||||
def create_social_media_analyst(llm):
|
||||
def social_media_analyst_node(state):
|
||||
current_date = state["trade_date"]
|
||||
ticker = state["company_of_interest"]
|
||||
company_name = state["company_of_interest"]
|
||||
|
||||
tools = [
|
||||
get_news,
|
||||
]
|
||||
|
||||
system_message = (
|
||||
"You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""",
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
(
|
||||
"system",
|
||||
"You are a helpful AI assistant, collaborating with other assistants."
|
||||
" Use the provided tools to progress towards answering the question."
|
||||
" If you are unable to fully answer, that's OK; another assistant with different tools"
|
||||
" will help where you left off. Execute what you can to make progress."
|
||||
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
|
||||
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
|
||||
" You have access to the following tools: {tool_names}.\n{system_message}"
|
||||
"For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}",
|
||||
),
|
||||
MessagesPlaceholder(variable_name="messages"),
|
||||
]
|
||||
)
|
||||
|
||||
prompt = prompt.partial(system_message=system_message)
|
||||
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
|
||||
prompt = prompt.partial(current_date=current_date)
|
||||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"sentiment_report": report,
|
||||
}
|
||||
|
||||
return social_media_analyst_node
|
||||
|
|
|
|||
|
|
@ -1,55 +1,55 @@
|
|||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_research_manager(llm, memory):
|
||||
def research_manager_node(state) -> dict:
|
||||
history = state["investment_debate_state"].get("history", "")
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
investment_debate_state = state["investment_debate_state"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
|
||||
prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented.
|
||||
|
||||
Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning. Your recommendation—Buy, Sell, or Hold—must be clear and actionable. Avoid defaulting to Hold simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments.
|
||||
|
||||
Additionally, develop a detailed investment plan for the trader. This should include:
|
||||
|
||||
Your Recommendation: A decisive stance supported by the most convincing arguments.
|
||||
Rationale: An explanation of why these arguments lead to your conclusion.
|
||||
Strategic Actions: Concrete steps for implementing the recommendation.
|
||||
Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting.
|
||||
|
||||
Here are your past reflections on mistakes:
|
||||
\"{past_memory_str}\"
|
||||
|
||||
Here is the debate:
|
||||
Debate History:
|
||||
{history}"""
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
new_investment_debate_state = {
|
||||
"judge_decision": response.content,
|
||||
"history": investment_debate_state.get("history", ""),
|
||||
"bear_history": investment_debate_state.get("bear_history", ""),
|
||||
"bull_history": investment_debate_state.get("bull_history", ""),
|
||||
"current_response": response.content,
|
||||
"count": investment_debate_state["count"],
|
||||
}
|
||||
|
||||
return {
|
||||
"investment_debate_state": new_investment_debate_state,
|
||||
"investment_plan": response.content,
|
||||
}
|
||||
|
||||
return research_manager_node
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_research_manager(llm, memory):
|
||||
def research_manager_node(state) -> dict:
|
||||
history = state["investment_debate_state"].get("history", "")
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
investment_debate_state = state["investment_debate_state"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
|
||||
prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented.
|
||||
|
||||
Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning. Your recommendation—Buy, Sell, or Hold—must be clear and actionable. Avoid defaulting to Hold simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments.
|
||||
|
||||
Additionally, develop a detailed investment plan for the trader. This should include:
|
||||
|
||||
Your Recommendation: A decisive stance supported by the most convincing arguments.
|
||||
Rationale: An explanation of why these arguments lead to your conclusion.
|
||||
Strategic Actions: Concrete steps for implementing the recommendation.
|
||||
Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting.
|
||||
|
||||
Here are your past reflections on mistakes:
|
||||
\"{past_memory_str}\"
|
||||
|
||||
Here is the debate:
|
||||
Debate History:
|
||||
{history}"""
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
new_investment_debate_state = {
|
||||
"judge_decision": response.content,
|
||||
"history": investment_debate_state.get("history", ""),
|
||||
"bear_history": investment_debate_state.get("bear_history", ""),
|
||||
"bull_history": investment_debate_state.get("bull_history", ""),
|
||||
"current_response": response.content,
|
||||
"count": investment_debate_state["count"],
|
||||
}
|
||||
|
||||
return {
|
||||
"investment_debate_state": new_investment_debate_state,
|
||||
"investment_plan": response.content,
|
||||
}
|
||||
|
||||
return research_manager_node
|
||||
|
|
|
|||
|
|
@ -1,66 +1,66 @@
|
|||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_risk_manager(llm, memory):
|
||||
def risk_manager_node(state) -> dict:
|
||||
|
||||
company_name = state["company_of_interest"]
|
||||
|
||||
history = state["risk_debate_state"]["history"]
|
||||
risk_debate_state = state["risk_debate_state"]
|
||||
market_research_report = state["market_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["news_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
trader_plan = state["investment_plan"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
|
||||
prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Aggressive, Neutral, and Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness.
|
||||
|
||||
Guidelines for Decision-Making:
|
||||
1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context.
|
||||
2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate.
|
||||
3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights.
|
||||
4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money.
|
||||
|
||||
Deliverables:
|
||||
- A clear and actionable recommendation: Buy, Sell, or Hold.
|
||||
- Detailed reasoning anchored in the debate and past reflections.
|
||||
|
||||
---
|
||||
|
||||
**Analysts Debate History:**
|
||||
{history}
|
||||
|
||||
---
|
||||
|
||||
Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes."""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
new_risk_debate_state = {
|
||||
"judge_decision": response.content,
|
||||
"history": risk_debate_state["history"],
|
||||
"aggressive_history": risk_debate_state["aggressive_history"],
|
||||
"conservative_history": risk_debate_state["conservative_history"],
|
||||
"neutral_history": risk_debate_state["neutral_history"],
|
||||
"latest_speaker": "Judge",
|
||||
"current_aggressive_response": risk_debate_state["current_aggressive_response"],
|
||||
"current_conservative_response": risk_debate_state["current_conservative_response"],
|
||||
"current_neutral_response": risk_debate_state["current_neutral_response"],
|
||||
"count": risk_debate_state["count"],
|
||||
}
|
||||
|
||||
return {
|
||||
"risk_debate_state": new_risk_debate_state,
|
||||
"final_trade_decision": response.content,
|
||||
}
|
||||
|
||||
return risk_manager_node
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_risk_manager(llm, memory):
|
||||
def risk_manager_node(state) -> dict:
|
||||
|
||||
company_name = state["company_of_interest"]
|
||||
|
||||
history = state["risk_debate_state"]["history"]
|
||||
risk_debate_state = state["risk_debate_state"]
|
||||
market_research_report = state["market_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["news_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
trader_plan = state["investment_plan"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
|
||||
prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Aggressive, Neutral, and Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness.
|
||||
|
||||
Guidelines for Decision-Making:
|
||||
1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context.
|
||||
2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate.
|
||||
3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights.
|
||||
4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money.
|
||||
|
||||
Deliverables:
|
||||
- A clear and actionable recommendation: Buy, Sell, or Hold.
|
||||
- Detailed reasoning anchored in the debate and past reflections.
|
||||
|
||||
---
|
||||
|
||||
**Analysts Debate History:**
|
||||
{history}
|
||||
|
||||
---
|
||||
|
||||
Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes."""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
new_risk_debate_state = {
|
||||
"judge_decision": response.content,
|
||||
"history": risk_debate_state["history"],
|
||||
"aggressive_history": risk_debate_state["aggressive_history"],
|
||||
"conservative_history": risk_debate_state["conservative_history"],
|
||||
"neutral_history": risk_debate_state["neutral_history"],
|
||||
"latest_speaker": "Judge",
|
||||
"current_aggressive_response": risk_debate_state["current_aggressive_response"],
|
||||
"current_conservative_response": risk_debate_state["current_conservative_response"],
|
||||
"current_neutral_response": risk_debate_state["current_neutral_response"],
|
||||
"count": risk_debate_state["count"],
|
||||
}
|
||||
|
||||
return {
|
||||
"risk_debate_state": new_risk_debate_state,
|
||||
"final_trade_decision": response.content,
|
||||
}
|
||||
|
||||
return risk_manager_node
|
||||
|
|
|
|||
|
|
@ -1,61 +1,61 @@
|
|||
from langchain_core.messages import AIMessage
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_bear_researcher(llm, memory):
|
||||
def bear_node(state) -> dict:
|
||||
investment_debate_state = state["investment_debate_state"]
|
||||
history = investment_debate_state.get("history", "")
|
||||
bear_history = investment_debate_state.get("bear_history", "")
|
||||
|
||||
current_response = investment_debate_state.get("current_response", "")
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
|
||||
prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively.
|
||||
|
||||
Key points to focus on:
|
||||
|
||||
- Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could hinder the stock's performance.
|
||||
- Competitive Weaknesses: Emphasize vulnerabilities such as weaker market positioning, declining innovation, or threats from competitors.
|
||||
- Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support your position.
|
||||
- Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions.
|
||||
- Engagement: Present your argument in a conversational style, directly engaging with the bull analyst's points and debating effectively rather than simply listing facts.
|
||||
|
||||
Resources available:
|
||||
|
||||
Market research report: {market_research_report}
|
||||
Social media sentiment report: {sentiment_report}
|
||||
Latest world affairs news: {news_report}
|
||||
Company fundamentals report: {fundamentals_report}
|
||||
Conversation history of the debate: {history}
|
||||
Last bull argument: {current_response}
|
||||
Reflections from similar situations and lessons learned: {past_memory_str}
|
||||
Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past.
|
||||
"""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Bear Analyst: {response.content}"
|
||||
|
||||
new_investment_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"bear_history": bear_history + "\n" + argument,
|
||||
"bull_history": investment_debate_state.get("bull_history", ""),
|
||||
"current_response": argument,
|
||||
"count": investment_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"investment_debate_state": new_investment_debate_state}
|
||||
|
||||
return bear_node
|
||||
from langchain_core.messages import AIMessage
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_bear_researcher(llm, memory):
|
||||
def bear_node(state) -> dict:
|
||||
investment_debate_state = state["investment_debate_state"]
|
||||
history = investment_debate_state.get("history", "")
|
||||
bear_history = investment_debate_state.get("bear_history", "")
|
||||
|
||||
current_response = investment_debate_state.get("current_response", "")
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
|
||||
prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively.
|
||||
|
||||
Key points to focus on:
|
||||
|
||||
- Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could hinder the stock's performance.
|
||||
- Competitive Weaknesses: Emphasize vulnerabilities such as weaker market positioning, declining innovation, or threats from competitors.
|
||||
- Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support your position.
|
||||
- Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions.
|
||||
- Engagement: Present your argument in a conversational style, directly engaging with the bull analyst's points and debating effectively rather than simply listing facts.
|
||||
|
||||
Resources available:
|
||||
|
||||
Market research report: {market_research_report}
|
||||
Social media sentiment report: {sentiment_report}
|
||||
Latest world affairs news: {news_report}
|
||||
Company fundamentals report: {fundamentals_report}
|
||||
Conversation history of the debate: {history}
|
||||
Last bull argument: {current_response}
|
||||
Reflections from similar situations and lessons learned: {past_memory_str}
|
||||
Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past.
|
||||
"""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Bear Analyst: {response.content}"
|
||||
|
||||
new_investment_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"bear_history": bear_history + "\n" + argument,
|
||||
"bull_history": investment_debate_state.get("bull_history", ""),
|
||||
"current_response": argument,
|
||||
"count": investment_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"investment_debate_state": new_investment_debate_state}
|
||||
|
||||
return bear_node
|
||||
|
|
|
|||
|
|
@ -1,59 +1,59 @@
|
|||
from langchain_core.messages import AIMessage
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_bull_researcher(llm, memory):
|
||||
def bull_node(state) -> dict:
|
||||
investment_debate_state = state["investment_debate_state"]
|
||||
history = investment_debate_state.get("history", "")
|
||||
bull_history = investment_debate_state.get("bull_history", "")
|
||||
|
||||
current_response = investment_debate_state.get("current_response", "")
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
|
||||
prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively.
|
||||
|
||||
Key points to focus on:
|
||||
- Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability.
|
||||
- Competitive Advantages: Emphasize factors like unique products, strong branding, or dominant market positioning.
|
||||
- Positive Indicators: Use financial health, industry trends, and recent positive news as evidence.
|
||||
- Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why the bull perspective holds stronger merit.
|
||||
- Engagement: Present your argument in a conversational style, engaging directly with the bear analyst's points and debating effectively rather than just listing data.
|
||||
|
||||
Resources available:
|
||||
Market research report: {market_research_report}
|
||||
Social media sentiment report: {sentiment_report}
|
||||
Latest world affairs news: {news_report}
|
||||
Company fundamentals report: {fundamentals_report}
|
||||
Conversation history of the debate: {history}
|
||||
Last bear argument: {current_response}
|
||||
Reflections from similar situations and lessons learned: {past_memory_str}
|
||||
Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past.
|
||||
"""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Bull Analyst: {response.content}"
|
||||
|
||||
new_investment_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"bull_history": bull_history + "\n" + argument,
|
||||
"bear_history": investment_debate_state.get("bear_history", ""),
|
||||
"current_response": argument,
|
||||
"count": investment_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"investment_debate_state": new_investment_debate_state}
|
||||
|
||||
return bull_node
|
||||
from langchain_core.messages import AIMessage
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_bull_researcher(llm, memory):
|
||||
def bull_node(state) -> dict:
|
||||
investment_debate_state = state["investment_debate_state"]
|
||||
history = investment_debate_state.get("history", "")
|
||||
bull_history = investment_debate_state.get("bull_history", "")
|
||||
|
||||
current_response = investment_debate_state.get("current_response", "")
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
|
||||
prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively.
|
||||
|
||||
Key points to focus on:
|
||||
- Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability.
|
||||
- Competitive Advantages: Emphasize factors like unique products, strong branding, or dominant market positioning.
|
||||
- Positive Indicators: Use financial health, industry trends, and recent positive news as evidence.
|
||||
- Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why the bull perspective holds stronger merit.
|
||||
- Engagement: Present your argument in a conversational style, engaging directly with the bear analyst's points and debating effectively rather than just listing data.
|
||||
|
||||
Resources available:
|
||||
Market research report: {market_research_report}
|
||||
Social media sentiment report: {sentiment_report}
|
||||
Latest world affairs news: {news_report}
|
||||
Company fundamentals report: {fundamentals_report}
|
||||
Conversation history of the debate: {history}
|
||||
Last bear argument: {current_response}
|
||||
Reflections from similar situations and lessons learned: {past_memory_str}
|
||||
Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past.
|
||||
"""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Bull Analyst: {response.content}"
|
||||
|
||||
new_investment_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"bull_history": bull_history + "\n" + argument,
|
||||
"bear_history": investment_debate_state.get("bear_history", ""),
|
||||
"current_response": argument,
|
||||
"count": investment_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"investment_debate_state": new_investment_debate_state}
|
||||
|
||||
return bull_node
|
||||
|
|
|
|||
|
|
@ -1,55 +1,55 @@
|
|||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_aggressive_debator(llm):
|
||||
def aggressive_node(state) -> dict:
|
||||
risk_debate_state = state["risk_debate_state"]
|
||||
history = risk_debate_state.get("history", "")
|
||||
aggressive_history = risk_debate_state.get("aggressive_history", "")
|
||||
|
||||
current_conservative_response = risk_debate_state.get("current_conservative_response", "")
|
||||
current_neutral_response = risk_debate_state.get("current_neutral_response", "")
|
||||
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
trader_decision = state["trader_investment_plan"]
|
||||
|
||||
prompt = f"""As the Aggressive Risk Analyst, your role is to actively champion high-reward, high-risk opportunities, emphasizing bold strategies and competitive advantages. When evaluating the trader's decision or plan, focus intently on the potential upside, growth potential, and innovative benefits—even when these come with elevated risk. Use the provided market data and sentiment analysis to strengthen your arguments and challenge the opposing views. Specifically, respond directly to each point made by the conservative and neutral analysts, countering with data-driven rebuttals and persuasive reasoning. Highlight where their caution might miss critical opportunities or where their assumptions may be overly conservative. Here is the trader's decision:
|
||||
|
||||
{trader_decision}
|
||||
|
||||
Your task is to create a compelling case for the trader's decision by questioning and critiquing the conservative and neutral stances to demonstrate why your high-reward perspective offers the best path forward. Incorporate insights from the following sources into your arguments:
|
||||
|
||||
Market Research Report: {market_research_report}
|
||||
Social Media Sentiment Report: {sentiment_report}
|
||||
Latest World Affairs Report: {news_report}
|
||||
Company Fundamentals Report: {fundamentals_report}
|
||||
Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_conservative_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
|
||||
|
||||
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting."""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Aggressive Analyst: {response.content}"
|
||||
|
||||
new_risk_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"aggressive_history": aggressive_history + "\n" + argument,
|
||||
"conservative_history": risk_debate_state.get("conservative_history", ""),
|
||||
"neutral_history": risk_debate_state.get("neutral_history", ""),
|
||||
"latest_speaker": "Aggressive",
|
||||
"current_aggressive_response": argument,
|
||||
"current_conservative_response": risk_debate_state.get("current_conservative_response", ""),
|
||||
"current_neutral_response": risk_debate_state.get(
|
||||
"current_neutral_response", ""
|
||||
),
|
||||
"count": risk_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"risk_debate_state": new_risk_debate_state}
|
||||
|
||||
return aggressive_node
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_aggressive_debator(llm):
|
||||
def aggressive_node(state) -> dict:
|
||||
risk_debate_state = state["risk_debate_state"]
|
||||
history = risk_debate_state.get("history", "")
|
||||
aggressive_history = risk_debate_state.get("aggressive_history", "")
|
||||
|
||||
current_conservative_response = risk_debate_state.get("current_conservative_response", "")
|
||||
current_neutral_response = risk_debate_state.get("current_neutral_response", "")
|
||||
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
trader_decision = state["trader_investment_plan"]
|
||||
|
||||
prompt = f"""As the Aggressive Risk Analyst, your role is to actively champion high-reward, high-risk opportunities, emphasizing bold strategies and competitive advantages. When evaluating the trader's decision or plan, focus intently on the potential upside, growth potential, and innovative benefits—even when these come with elevated risk. Use the provided market data and sentiment analysis to strengthen your arguments and challenge the opposing views. Specifically, respond directly to each point made by the conservative and neutral analysts, countering with data-driven rebuttals and persuasive reasoning. Highlight where their caution might miss critical opportunities or where their assumptions may be overly conservative. Here is the trader's decision:
|
||||
|
||||
{trader_decision}
|
||||
|
||||
Your task is to create a compelling case for the trader's decision by questioning and critiquing the conservative and neutral stances to demonstrate why your high-reward perspective offers the best path forward. Incorporate insights from the following sources into your arguments:
|
||||
|
||||
Market Research Report: {market_research_report}
|
||||
Social Media Sentiment Report: {sentiment_report}
|
||||
Latest World Affairs Report: {news_report}
|
||||
Company Fundamentals Report: {fundamentals_report}
|
||||
Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_conservative_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
|
||||
|
||||
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting."""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Aggressive Analyst: {response.content}"
|
||||
|
||||
new_risk_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"aggressive_history": aggressive_history + "\n" + argument,
|
||||
"conservative_history": risk_debate_state.get("conservative_history", ""),
|
||||
"neutral_history": risk_debate_state.get("neutral_history", ""),
|
||||
"latest_speaker": "Aggressive",
|
||||
"current_aggressive_response": argument,
|
||||
"current_conservative_response": risk_debate_state.get("current_conservative_response", ""),
|
||||
"current_neutral_response": risk_debate_state.get(
|
||||
"current_neutral_response", ""
|
||||
),
|
||||
"count": risk_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"risk_debate_state": new_risk_debate_state}
|
||||
|
||||
return aggressive_node
|
||||
|
|
|
|||
|
|
@ -1,58 +1,58 @@
|
|||
from langchain_core.messages import AIMessage
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_conservative_debator(llm):
|
||||
def conservative_node(state) -> dict:
|
||||
risk_debate_state = state["risk_debate_state"]
|
||||
history = risk_debate_state.get("history", "")
|
||||
conservative_history = risk_debate_state.get("conservative_history", "")
|
||||
|
||||
current_aggressive_response = risk_debate_state.get("current_aggressive_response", "")
|
||||
current_neutral_response = risk_debate_state.get("current_neutral_response", "")
|
||||
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
trader_decision = state["trader_investment_plan"]
|
||||
|
||||
prompt = f"""As the Conservative Risk Analyst, your primary objective is to protect assets, minimize volatility, and ensure steady, reliable growth. You prioritize stability, security, and risk mitigation, carefully assessing potential losses, economic downturns, and market volatility. When evaluating the trader's decision or plan, critically examine high-risk elements, pointing out where the decision may expose the firm to undue risk and where more cautious alternatives could secure long-term gains. Here is the trader's decision:
|
||||
|
||||
{trader_decision}
|
||||
|
||||
Your task is to actively counter the arguments of the Aggressive and Neutral Analysts, highlighting where their views may overlook potential threats or fail to prioritize sustainability. Respond directly to their points, drawing from the following data sources to build a convincing case for a low-risk approach adjustment to the trader's decision:
|
||||
|
||||
Market Research Report: {market_research_report}
|
||||
Social Media Sentiment Report: {sentiment_report}
|
||||
Latest World Affairs Report: {news_report}
|
||||
Company Fundamentals Report: {fundamentals_report}
|
||||
Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
|
||||
|
||||
Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting."""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Conservative Analyst: {response.content}"
|
||||
|
||||
new_risk_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"aggressive_history": risk_debate_state.get("aggressive_history", ""),
|
||||
"conservative_history": conservative_history + "\n" + argument,
|
||||
"neutral_history": risk_debate_state.get("neutral_history", ""),
|
||||
"latest_speaker": "Conservative",
|
||||
"current_aggressive_response": risk_debate_state.get(
|
||||
"current_aggressive_response", ""
|
||||
),
|
||||
"current_conservative_response": argument,
|
||||
"current_neutral_response": risk_debate_state.get(
|
||||
"current_neutral_response", ""
|
||||
),
|
||||
"count": risk_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"risk_debate_state": new_risk_debate_state}
|
||||
|
||||
return conservative_node
|
||||
from langchain_core.messages import AIMessage
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_conservative_debator(llm):
|
||||
def conservative_node(state) -> dict:
|
||||
risk_debate_state = state["risk_debate_state"]
|
||||
history = risk_debate_state.get("history", "")
|
||||
conservative_history = risk_debate_state.get("conservative_history", "")
|
||||
|
||||
current_aggressive_response = risk_debate_state.get("current_aggressive_response", "")
|
||||
current_neutral_response = risk_debate_state.get("current_neutral_response", "")
|
||||
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
trader_decision = state["trader_investment_plan"]
|
||||
|
||||
prompt = f"""As the Conservative Risk Analyst, your primary objective is to protect assets, minimize volatility, and ensure steady, reliable growth. You prioritize stability, security, and risk mitigation, carefully assessing potential losses, economic downturns, and market volatility. When evaluating the trader's decision or plan, critically examine high-risk elements, pointing out where the decision may expose the firm to undue risk and where more cautious alternatives could secure long-term gains. Here is the trader's decision:
|
||||
|
||||
{trader_decision}
|
||||
|
||||
Your task is to actively counter the arguments of the Aggressive and Neutral Analysts, highlighting where their views may overlook potential threats or fail to prioritize sustainability. Respond directly to their points, drawing from the following data sources to build a convincing case for a low-risk approach adjustment to the trader's decision:
|
||||
|
||||
Market Research Report: {market_research_report}
|
||||
Social Media Sentiment Report: {sentiment_report}
|
||||
Latest World Affairs Report: {news_report}
|
||||
Company Fundamentals Report: {fundamentals_report}
|
||||
Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
|
||||
|
||||
Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting."""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Conservative Analyst: {response.content}"
|
||||
|
||||
new_risk_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"aggressive_history": risk_debate_state.get("aggressive_history", ""),
|
||||
"conservative_history": conservative_history + "\n" + argument,
|
||||
"neutral_history": risk_debate_state.get("neutral_history", ""),
|
||||
"latest_speaker": "Conservative",
|
||||
"current_aggressive_response": risk_debate_state.get(
|
||||
"current_aggressive_response", ""
|
||||
),
|
||||
"current_conservative_response": argument,
|
||||
"current_neutral_response": risk_debate_state.get(
|
||||
"current_neutral_response", ""
|
||||
),
|
||||
"count": risk_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"risk_debate_state": new_risk_debate_state}
|
||||
|
||||
return conservative_node
|
||||
|
|
|
|||
|
|
@ -1,55 +1,55 @@
|
|||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_neutral_debator(llm):
|
||||
def neutral_node(state) -> dict:
|
||||
risk_debate_state = state["risk_debate_state"]
|
||||
history = risk_debate_state.get("history", "")
|
||||
neutral_history = risk_debate_state.get("neutral_history", "")
|
||||
|
||||
current_aggressive_response = risk_debate_state.get("current_aggressive_response", "")
|
||||
current_conservative_response = risk_debate_state.get("current_conservative_response", "")
|
||||
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
trader_decision = state["trader_investment_plan"]
|
||||
|
||||
prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced perspective, weighing both the potential benefits and risks of the trader's decision or plan. You prioritize a well-rounded approach, evaluating the upsides and downsides while factoring in broader market trends, potential economic shifts, and diversification strategies.Here is the trader's decision:
|
||||
|
||||
{trader_decision}
|
||||
|
||||
Your task is to challenge both the Aggressive and Conservative Analysts, pointing out where each perspective may be overly optimistic or overly cautious. Use insights from the following data sources to support a moderate, sustainable strategy to adjust the trader's decision:
|
||||
|
||||
Market Research Report: {market_research_report}
|
||||
Social Media Sentiment Report: {sentiment_report}
|
||||
Latest World Affairs Report: {news_report}
|
||||
Company Fundamentals Report: {fundamentals_report}
|
||||
Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the conservative analyst: {current_conservative_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
|
||||
|
||||
Engage actively by analyzing both sides critically, addressing weaknesses in the aggressive and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting."""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Neutral Analyst: {response.content}"
|
||||
|
||||
new_risk_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"aggressive_history": risk_debate_state.get("aggressive_history", ""),
|
||||
"conservative_history": risk_debate_state.get("conservative_history", ""),
|
||||
"neutral_history": neutral_history + "\n" + argument,
|
||||
"latest_speaker": "Neutral",
|
||||
"current_aggressive_response": risk_debate_state.get(
|
||||
"current_aggressive_response", ""
|
||||
),
|
||||
"current_conservative_response": risk_debate_state.get("current_conservative_response", ""),
|
||||
"current_neutral_response": argument,
|
||||
"count": risk_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"risk_debate_state": new_risk_debate_state}
|
||||
|
||||
return neutral_node
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_neutral_debator(llm):
|
||||
def neutral_node(state) -> dict:
|
||||
risk_debate_state = state["risk_debate_state"]
|
||||
history = risk_debate_state.get("history", "")
|
||||
neutral_history = risk_debate_state.get("neutral_history", "")
|
||||
|
||||
current_aggressive_response = risk_debate_state.get("current_aggressive_response", "")
|
||||
current_conservative_response = risk_debate_state.get("current_conservative_response", "")
|
||||
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
trader_decision = state["trader_investment_plan"]
|
||||
|
||||
prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced perspective, weighing both the potential benefits and risks of the trader's decision or plan. You prioritize a well-rounded approach, evaluating the upsides and downsides while factoring in broader market trends, potential economic shifts, and diversification strategies.Here is the trader's decision:
|
||||
|
||||
{trader_decision}
|
||||
|
||||
Your task is to challenge both the Aggressive and Conservative Analysts, pointing out where each perspective may be overly optimistic or overly cautious. Use insights from the following data sources to support a moderate, sustainable strategy to adjust the trader's decision:
|
||||
|
||||
Market Research Report: {market_research_report}
|
||||
Social Media Sentiment Report: {sentiment_report}
|
||||
Latest World Affairs Report: {news_report}
|
||||
Company Fundamentals Report: {fundamentals_report}
|
||||
Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the conservative analyst: {current_conservative_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point.
|
||||
|
||||
Engage actively by analyzing both sides critically, addressing weaknesses in the aggressive and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting."""
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
argument = f"Neutral Analyst: {response.content}"
|
||||
|
||||
new_risk_debate_state = {
|
||||
"history": history + "\n" + argument,
|
||||
"aggressive_history": risk_debate_state.get("aggressive_history", ""),
|
||||
"conservative_history": risk_debate_state.get("conservative_history", ""),
|
||||
"neutral_history": neutral_history + "\n" + argument,
|
||||
"latest_speaker": "Neutral",
|
||||
"current_aggressive_response": risk_debate_state.get(
|
||||
"current_aggressive_response", ""
|
||||
),
|
||||
"current_conservative_response": risk_debate_state.get("current_conservative_response", ""),
|
||||
"current_neutral_response": argument,
|
||||
"count": risk_debate_state["count"] + 1,
|
||||
}
|
||||
|
||||
return {"risk_debate_state": new_risk_debate_state}
|
||||
|
||||
return neutral_node
|
||||
|
|
|
|||
|
|
@ -1,53 +1,53 @@
|
|||
"""Structured output agents for the equity ranking engine."""
|
||||
|
||||
from .tier1 import (
|
||||
create_validation_node,
|
||||
create_macro_node,
|
||||
create_liquidity_node,
|
||||
)
|
||||
from .tier2 import (
|
||||
create_business_quality_node,
|
||||
create_institutional_flow_node,
|
||||
create_valuation_node,
|
||||
create_entry_timing_node,
|
||||
create_earnings_revisions_node,
|
||||
create_sector_rotation_node,
|
||||
create_backlog_node,
|
||||
create_crowding_node,
|
||||
create_archetype_node,
|
||||
)
|
||||
from .tier3 import (
|
||||
create_bull_case_node,
|
||||
create_bear_case_node,
|
||||
create_debate_node,
|
||||
create_risk_node,
|
||||
create_final_decision_node,
|
||||
)
|
||||
from .scoring import create_scoring_node
|
||||
from .portfolio import (
|
||||
create_theme_substitution_node,
|
||||
create_position_replacement_node,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"create_validation_node",
|
||||
"create_macro_node",
|
||||
"create_liquidity_node",
|
||||
"create_business_quality_node",
|
||||
"create_institutional_flow_node",
|
||||
"create_valuation_node",
|
||||
"create_entry_timing_node",
|
||||
"create_earnings_revisions_node",
|
||||
"create_sector_rotation_node",
|
||||
"create_backlog_node",
|
||||
"create_crowding_node",
|
||||
"create_archetype_node",
|
||||
"create_bull_case_node",
|
||||
"create_bear_case_node",
|
||||
"create_debate_node",
|
||||
"create_risk_node",
|
||||
"create_final_decision_node",
|
||||
"create_scoring_node",
|
||||
"create_theme_substitution_node",
|
||||
"create_position_replacement_node",
|
||||
]
|
||||
"""Structured output agents for the equity ranking engine."""
|
||||
|
||||
from .tier1 import (
|
||||
create_validation_node,
|
||||
create_macro_node,
|
||||
create_liquidity_node,
|
||||
)
|
||||
from .tier2 import (
|
||||
create_business_quality_node,
|
||||
create_institutional_flow_node,
|
||||
create_valuation_node,
|
||||
create_entry_timing_node,
|
||||
create_earnings_revisions_node,
|
||||
create_sector_rotation_node,
|
||||
create_backlog_node,
|
||||
create_crowding_node,
|
||||
create_archetype_node,
|
||||
)
|
||||
from .tier3 import (
|
||||
create_bull_case_node,
|
||||
create_bear_case_node,
|
||||
create_debate_node,
|
||||
create_risk_node,
|
||||
create_final_decision_node,
|
||||
)
|
||||
from .scoring import create_scoring_node
|
||||
from .portfolio import (
|
||||
create_theme_substitution_node,
|
||||
create_position_replacement_node,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"create_validation_node",
|
||||
"create_macro_node",
|
||||
"create_liquidity_node",
|
||||
"create_business_quality_node",
|
||||
"create_institutional_flow_node",
|
||||
"create_valuation_node",
|
||||
"create_entry_timing_node",
|
||||
"create_earnings_revisions_node",
|
||||
"create_sector_rotation_node",
|
||||
"create_backlog_node",
|
||||
"create_crowding_node",
|
||||
"create_archetype_node",
|
||||
"create_bull_case_node",
|
||||
"create_bear_case_node",
|
||||
"create_debate_node",
|
||||
"create_risk_node",
|
||||
"create_final_decision_node",
|
||||
"create_scoring_node",
|
||||
"create_theme_substitution_node",
|
||||
"create_position_replacement_node",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,267 +1,267 @@
|
|||
"""Portfolio-level agents: Theme Substitution Engine, Position Replacement Agent.
|
||||
|
||||
These run after scoring, before the debate phase. They use the deep-thinking LLM
|
||||
to evaluate the stock in context — is it the best expression of its theme? Should
|
||||
it replace an existing holding?
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import yfinance as yf
|
||||
|
||||
from tradingagents.models import (
|
||||
PositionReplacementOutput,
|
||||
ThemeStock,
|
||||
ThemeSubstitutionOutput,
|
||||
invoke_structured,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _fetch_peer_basics(tickers: List[str]) -> List[dict]:
|
||||
"""Fetch basic yfinance data for a list of peer tickers."""
|
||||
peers = []
|
||||
for sym in tickers[:8]: # cap at 8 to keep prompt manageable
|
||||
try:
|
||||
info = yf.Ticker(sym.upper()).info or {}
|
||||
peers.append({
|
||||
"ticker": sym.upper(),
|
||||
"company_name": info.get("longName") or info.get("shortName") or sym,
|
||||
"market_cap": info.get("marketCap"),
|
||||
"current_price": info.get("currentPrice") or info.get("regularMarketPrice"),
|
||||
"trailing_pe": info.get("trailingPE"),
|
||||
"forward_pe": info.get("forwardPE"),
|
||||
"revenue_growth": info.get("revenueGrowth"),
|
||||
"profit_margins": info.get("profitMargins"),
|
||||
"return_on_equity": info.get("returnOnEquity"),
|
||||
"52w_range_pct": _range_pct(info),
|
||||
})
|
||||
except Exception:
|
||||
peers.append({"ticker": sym.upper(), "error": "fetch failed"})
|
||||
return peers
|
||||
|
||||
|
||||
def _range_pct(info: dict) -> float | None:
|
||||
hi = info.get("fiftyTwoWeekHigh")
|
||||
lo = info.get("fiftyTwoWeekLow")
|
||||
price = info.get("currentPrice") or info.get("regularMarketPrice")
|
||||
if hi and lo and price and (hi - lo) > 0:
|
||||
return round((price - lo) / (hi - lo) * 100, 1)
|
||||
return None
|
||||
|
||||
|
||||
def _summarize_for_theme(state: Dict[str, Any]) -> str:
|
||||
"""Compact summary of the candidate stock for theme comparison."""
|
||||
card = state.get("company_card") or {}
|
||||
macro = state.get("macro") or {}
|
||||
bq = state.get("business_quality") or {}
|
||||
inst = state.get("institutional_flow") or {}
|
||||
val = state.get("valuation") or {}
|
||||
er = state.get("earnings_revisions") or {}
|
||||
arch = state.get("archetype") or {}
|
||||
|
||||
return "\n".join([
|
||||
f"Ticker: {card.get('ticker', '?')} | {card.get('company_name', '?')}",
|
||||
f"Sector: {card.get('sector', '?')} | Industry: {card.get('industry', '?')}",
|
||||
f"Market Cap: {card.get('market_cap_formatted', 'N/A')}",
|
||||
f"Archetype: {arch.get('archetype', 'N/A')}",
|
||||
f"Master Score: {state.get('master_score', 'N/A')}",
|
||||
f"Adjusted Score: {state.get('adjusted_score', 'N/A')}",
|
||||
f"Position Role: {state.get('position_role', 'N/A')}",
|
||||
f"Macro Regime: {macro.get('regime_label', '?')} | Risk: {macro.get('risk_appetite', '?')} | Liq: {macro.get('liquidity_regime', '?')}",
|
||||
f"Business Quality: {bq.get('score_0_to_10', 'N/A')} | Moat: {bq.get('competitive_moat', '?')}",
|
||||
f"Inst Flow: {inst.get('score_0_to_10', 'N/A')} | Smart Money: {inst.get('smart_money_signal', '?')}",
|
||||
f"Valuation: {val.get('score_0_to_10', 'N/A')} | Verdict: {val.get('valuation_verdict', '?')}",
|
||||
f"Earnings Rev: {er.get('score_0_to_10', 'N/A')} | Direction: {er.get('eps_revision_direction', '?')}",
|
||||
])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Theme Substitution Engine
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_theme_substitution_node(llm):
|
||||
"""Identifies whether the stock is the best expression of its theme."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
card = state.get("company_card") or {}
|
||||
summary = _summarize_for_theme(state)
|
||||
master_score = state.get("master_score", 0)
|
||||
|
||||
# Use yfinance to find peers in the same industry
|
||||
try:
|
||||
t = yf.Ticker(ticker.upper())
|
||||
info = t.info or {}
|
||||
industry = info.get("industry", "")
|
||||
sector = info.get("sector", "")
|
||||
except Exception:
|
||||
industry = card.get("industry", "")
|
||||
sector = card.get("sector", "")
|
||||
|
||||
# Fetch competitor/peer data to ground the LLM's comparison
|
||||
competitors = card.get("competitors") or []
|
||||
peer_data = _fetch_peer_basics(competitors) if competitors else []
|
||||
peer_summary = ""
|
||||
if peer_data:
|
||||
lines = []
|
||||
for p in peer_data:
|
||||
if p.get("error"):
|
||||
continue
|
||||
rg = p.get("revenue_growth")
|
||||
rg_str = f"{rg*100:.1f}%" if rg else "N/A"
|
||||
pm = p.get("profit_margins")
|
||||
pm_str = f"{pm*100:.1f}%" if pm else "N/A"
|
||||
lines.append(
|
||||
f" {p['ticker']}: P/E={p.get('trailing_pe', 'N/A')}, "
|
||||
f"Fwd P/E={p.get('forward_pe', 'N/A')}, "
|
||||
f"RevGrowth={rg_str}, "
|
||||
f"Margins={pm_str}, "
|
||||
f"52W={p.get('52w_range_pct', 'N/A')}%"
|
||||
)
|
||||
peer_summary = "\n".join(lines)
|
||||
|
||||
theme_prompt = f"""You are a Theme Substitution Analyst. Your job: determine if {ticker} is the BEST
|
||||
expression of its investment theme, or if better alternatives exist.
|
||||
|
||||
CANDIDATE STOCK:
|
||||
{summary}
|
||||
|
||||
{f'PEER FUNDAMENTALS (live data):{chr(10)}{peer_summary}' if peer_summary else 'No live peer data available — use your knowledge of these companies.'}
|
||||
|
||||
INSTRUCTIONS — do this in order:
|
||||
|
||||
1. IDENTIFY THE THEME: What macro/sector theme does {ticker} express?
|
||||
Examples: "AI infrastructure buildout", "GLP-1 obesity drugs", "defense spending ramp",
|
||||
"EV supply chain", "cloud migration", "reshoring/nearshoring".
|
||||
Name it clearly in theme_name.
|
||||
|
||||
2. LIST THEME PEERS: Name 3-6 other publicly traded stocks that express the SAME theme.
|
||||
Use the peer data above if available. These should be the strongest competitors
|
||||
for capital allocation in this theme.
|
||||
For each peer, score master_score_estimate (0-10) based on fundamentals, momentum,
|
||||
and positioning vs {ticker}.
|
||||
|
||||
3. RANK WITHIN THEME: Rank all stocks (including {ticker}) by investment quality.
|
||||
The stock with the best combination of: business quality, valuation, momentum,
|
||||
and institutional positioning should rank #1.
|
||||
|
||||
4. DETERMINE BEST EXPRESSION:
|
||||
- Set best_expression_of_theme=true if {ticker} is rank #1 or close (#1-2).
|
||||
- Set best_expression_of_theme=false if clearly better alternatives exist.
|
||||
- List stronger_alternatives (tickers that rank above {ticker}).
|
||||
- Set relative_score_gap: how many score points {ticker} trails the best alternative
|
||||
(0 if {ticker} is best, positive number if it trails).
|
||||
|
||||
5. PORTFOLIO OVERLAP: Flag if {ticker} has high correlation with common holdings.
|
||||
Set portfolio_overlap_warning if this stock would add redundant exposure.
|
||||
|
||||
Be honest and rigorous. A stock can score well absolutely but still not be the best
|
||||
way to express its theme."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, ThemeSubstitutionOutput, theme_prompt)
|
||||
except Exception as e:
|
||||
logger.warning("ThemeSubstitution LLM failed: %s", e)
|
||||
result = ThemeSubstitutionOutput(
|
||||
theme_name="Unknown",
|
||||
best_expression_of_theme=True,
|
||||
reasoning="Theme analysis unavailable",
|
||||
)
|
||||
|
||||
return {"theme_substitution": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Position Replacement Agent
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_position_replacement_node(llm):
|
||||
"""Identifies when a new stock is a better use of capital than alternatives."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
summary = _summarize_for_theme(state)
|
||||
master_score = state.get("master_score", 0)
|
||||
theme = state.get("theme_substitution") or {}
|
||||
|
||||
# Get the strongest alternative from theme analysis
|
||||
stronger = theme.get("stronger_alternatives", [])
|
||||
theme_stocks = theme.get("theme_stocks_ranked", [])
|
||||
theme_name = theme.get("theme_name", "Unknown")
|
||||
|
||||
# If no stronger alternatives, this IS the best — skip deep comparison
|
||||
if not stronger and theme.get("best_expression_of_theme", True):
|
||||
result = PositionReplacementOutput(
|
||||
replace_candidate=ticker,
|
||||
replace_with="",
|
||||
score_difference=0.0,
|
||||
theme_overlap=theme_name,
|
||||
replacement_reason=f"{ticker} is the best expression of the '{theme_name}' theme.",
|
||||
conviction_level="high",
|
||||
should_replace=False,
|
||||
)
|
||||
return {"position_replacement": result.model_dump()}
|
||||
|
||||
# Format theme peers for comparison
|
||||
peer_lines = []
|
||||
for ts in theme_stocks[:6]:
|
||||
if isinstance(ts, dict):
|
||||
peer_lines.append(
|
||||
f" {ts.get('ticker', '?')}: est. score {ts.get('master_score_estimate', '?')}/10 "
|
||||
f"— advantage: {ts.get('key_advantage', 'N/A')}, weakness: {ts.get('key_weakness', 'N/A')}"
|
||||
)
|
||||
|
||||
prompt = f"""You are a Position Replacement Analyst. Determine if {ticker} should be replaced
|
||||
by a stronger alternative in the same theme.
|
||||
|
||||
CANDIDATE STOCK:
|
||||
{summary}
|
||||
|
||||
THEME: {theme_name}
|
||||
Best expression: {'Yes' if theme.get('best_expression_of_theme') else 'No'}
|
||||
Score gap vs best: {theme.get('relative_score_gap', 0):.1f}
|
||||
|
||||
THEME PEERS:
|
||||
{chr(10).join(peer_lines) or 'No peers available'}
|
||||
|
||||
STRONGER ALTERNATIVES: {', '.join(stronger) if stronger else 'None'}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Compare {ticker} to the strongest alternative in the theme.
|
||||
2. Assess on these dimensions: master score, earnings revisions, institutional flow,
|
||||
risk profile, valuation, entry timing.
|
||||
3. Set replace_with to the best alternative ticker (empty if none).
|
||||
4. Set score_difference: how much better the replacement is (positive = replacement is stronger).
|
||||
5. Set conviction_level: high / medium / low.
|
||||
- high: replacement is clearly better on 3+ dimensions.
|
||||
- medium: replacement is better on 1-2 dimensions, mixed on others.
|
||||
- low: marginal difference, keep current.
|
||||
6. Set should_replace=true only if conviction_level is high.
|
||||
7. List what the replacement is stronger_on and weaker_on vs {ticker}.
|
||||
|
||||
Be conservative. Don't recommend replacement for marginal differences."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, PositionReplacementOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("PositionReplacement LLM failed: %s", e)
|
||||
result = PositionReplacementOutput(
|
||||
replace_candidate=ticker,
|
||||
should_replace=False,
|
||||
replacement_reason="Position replacement analysis unavailable",
|
||||
)
|
||||
|
||||
result.replace_candidate = ticker
|
||||
result.theme_overlap = theme_name
|
||||
|
||||
return {"position_replacement": result.model_dump()}
|
||||
|
||||
return node
|
||||
"""Portfolio-level agents: Theme Substitution Engine, Position Replacement Agent.
|
||||
|
||||
These run after scoring, before the debate phase. They use the deep-thinking LLM
|
||||
to evaluate the stock in context — is it the best expression of its theme? Should
|
||||
it replace an existing holding?
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import yfinance as yf
|
||||
|
||||
from tradingagents.models import (
|
||||
PositionReplacementOutput,
|
||||
ThemeStock,
|
||||
ThemeSubstitutionOutput,
|
||||
invoke_structured,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _fetch_peer_basics(tickers: List[str]) -> List[dict]:
|
||||
"""Fetch basic yfinance data for a list of peer tickers."""
|
||||
peers = []
|
||||
for sym in tickers[:8]: # cap at 8 to keep prompt manageable
|
||||
try:
|
||||
info = yf.Ticker(sym.upper()).info or {}
|
||||
peers.append({
|
||||
"ticker": sym.upper(),
|
||||
"company_name": info.get("longName") or info.get("shortName") or sym,
|
||||
"market_cap": info.get("marketCap"),
|
||||
"current_price": info.get("currentPrice") or info.get("regularMarketPrice"),
|
||||
"trailing_pe": info.get("trailingPE"),
|
||||
"forward_pe": info.get("forwardPE"),
|
||||
"revenue_growth": info.get("revenueGrowth"),
|
||||
"profit_margins": info.get("profitMargins"),
|
||||
"return_on_equity": info.get("returnOnEquity"),
|
||||
"52w_range_pct": _range_pct(info),
|
||||
})
|
||||
except Exception:
|
||||
peers.append({"ticker": sym.upper(), "error": "fetch failed"})
|
||||
return peers
|
||||
|
||||
|
||||
def _range_pct(info: dict) -> float | None:
|
||||
hi = info.get("fiftyTwoWeekHigh")
|
||||
lo = info.get("fiftyTwoWeekLow")
|
||||
price = info.get("currentPrice") or info.get("regularMarketPrice")
|
||||
if hi and lo and price and (hi - lo) > 0:
|
||||
return round((price - lo) / (hi - lo) * 100, 1)
|
||||
return None
|
||||
|
||||
|
||||
def _summarize_for_theme(state: Dict[str, Any]) -> str:
|
||||
"""Compact summary of the candidate stock for theme comparison."""
|
||||
card = state.get("company_card") or {}
|
||||
macro = state.get("macro") or {}
|
||||
bq = state.get("business_quality") or {}
|
||||
inst = state.get("institutional_flow") or {}
|
||||
val = state.get("valuation") or {}
|
||||
er = state.get("earnings_revisions") or {}
|
||||
arch = state.get("archetype") or {}
|
||||
|
||||
return "\n".join([
|
||||
f"Ticker: {card.get('ticker', '?')} | {card.get('company_name', '?')}",
|
||||
f"Sector: {card.get('sector', '?')} | Industry: {card.get('industry', '?')}",
|
||||
f"Market Cap: {card.get('market_cap_formatted', 'N/A')}",
|
||||
f"Archetype: {arch.get('archetype', 'N/A')}",
|
||||
f"Master Score: {state.get('master_score', 'N/A')}",
|
||||
f"Adjusted Score: {state.get('adjusted_score', 'N/A')}",
|
||||
f"Position Role: {state.get('position_role', 'N/A')}",
|
||||
f"Macro Regime: {macro.get('regime_label', '?')} | Risk: {macro.get('risk_appetite', '?')} | Liq: {macro.get('liquidity_regime', '?')}",
|
||||
f"Business Quality: {bq.get('score_0_to_10', 'N/A')} | Moat: {bq.get('competitive_moat', '?')}",
|
||||
f"Inst Flow: {inst.get('score_0_to_10', 'N/A')} | Smart Money: {inst.get('smart_money_signal', '?')}",
|
||||
f"Valuation: {val.get('score_0_to_10', 'N/A')} | Verdict: {val.get('valuation_verdict', '?')}",
|
||||
f"Earnings Rev: {er.get('score_0_to_10', 'N/A')} | Direction: {er.get('eps_revision_direction', '?')}",
|
||||
])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Theme Substitution Engine
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_theme_substitution_node(llm):
|
||||
"""Identifies whether the stock is the best expression of its theme."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
card = state.get("company_card") or {}
|
||||
summary = _summarize_for_theme(state)
|
||||
master_score = state.get("master_score", 0)
|
||||
|
||||
# Use yfinance to find peers in the same industry
|
||||
try:
|
||||
t = yf.Ticker(ticker.upper())
|
||||
info = t.info or {}
|
||||
industry = info.get("industry", "")
|
||||
sector = info.get("sector", "")
|
||||
except Exception:
|
||||
industry = card.get("industry", "")
|
||||
sector = card.get("sector", "")
|
||||
|
||||
# Fetch competitor/peer data to ground the LLM's comparison
|
||||
competitors = card.get("competitors") or []
|
||||
peer_data = _fetch_peer_basics(competitors) if competitors else []
|
||||
peer_summary = ""
|
||||
if peer_data:
|
||||
lines = []
|
||||
for p in peer_data:
|
||||
if p.get("error"):
|
||||
continue
|
||||
rg = p.get("revenue_growth")
|
||||
rg_str = f"{rg*100:.1f}%" if rg else "N/A"
|
||||
pm = p.get("profit_margins")
|
||||
pm_str = f"{pm*100:.1f}%" if pm else "N/A"
|
||||
lines.append(
|
||||
f" {p['ticker']}: P/E={p.get('trailing_pe', 'N/A')}, "
|
||||
f"Fwd P/E={p.get('forward_pe', 'N/A')}, "
|
||||
f"RevGrowth={rg_str}, "
|
||||
f"Margins={pm_str}, "
|
||||
f"52W={p.get('52w_range_pct', 'N/A')}%"
|
||||
)
|
||||
peer_summary = "\n".join(lines)
|
||||
|
||||
theme_prompt = f"""You are a Theme Substitution Analyst. Your job: determine if {ticker} is the BEST
|
||||
expression of its investment theme, or if better alternatives exist.
|
||||
|
||||
CANDIDATE STOCK:
|
||||
{summary}
|
||||
|
||||
{f'PEER FUNDAMENTALS (live data):{chr(10)}{peer_summary}' if peer_summary else 'No live peer data available — use your knowledge of these companies.'}
|
||||
|
||||
INSTRUCTIONS — do this in order:
|
||||
|
||||
1. IDENTIFY THE THEME: What macro/sector theme does {ticker} express?
|
||||
Examples: "AI infrastructure buildout", "GLP-1 obesity drugs", "defense spending ramp",
|
||||
"EV supply chain", "cloud migration", "reshoring/nearshoring".
|
||||
Name it clearly in theme_name.
|
||||
|
||||
2. LIST THEME PEERS: Name 3-6 other publicly traded stocks that express the SAME theme.
|
||||
Use the peer data above if available. These should be the strongest competitors
|
||||
for capital allocation in this theme.
|
||||
For each peer, score master_score_estimate (0-10) based on fundamentals, momentum,
|
||||
and positioning vs {ticker}.
|
||||
|
||||
3. RANK WITHIN THEME: Rank all stocks (including {ticker}) by investment quality.
|
||||
The stock with the best combination of: business quality, valuation, momentum,
|
||||
and institutional positioning should rank #1.
|
||||
|
||||
4. DETERMINE BEST EXPRESSION:
|
||||
- Set best_expression_of_theme=true if {ticker} is rank #1 or close (#1-2).
|
||||
- Set best_expression_of_theme=false if clearly better alternatives exist.
|
||||
- List stronger_alternatives (tickers that rank above {ticker}).
|
||||
- Set relative_score_gap: how many score points {ticker} trails the best alternative
|
||||
(0 if {ticker} is best, positive number if it trails).
|
||||
|
||||
5. PORTFOLIO OVERLAP: Flag if {ticker} has high correlation with common holdings.
|
||||
Set portfolio_overlap_warning if this stock would add redundant exposure.
|
||||
|
||||
Be honest and rigorous. A stock can score well absolutely but still not be the best
|
||||
way to express its theme."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, ThemeSubstitutionOutput, theme_prompt)
|
||||
except Exception as e:
|
||||
logger.warning("ThemeSubstitution LLM failed: %s", e)
|
||||
result = ThemeSubstitutionOutput(
|
||||
theme_name="Unknown",
|
||||
best_expression_of_theme=True,
|
||||
reasoning="Theme analysis unavailable",
|
||||
)
|
||||
|
||||
return {"theme_substitution": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Position Replacement Agent
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_position_replacement_node(llm):
|
||||
"""Identifies when a new stock is a better use of capital than alternatives."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
summary = _summarize_for_theme(state)
|
||||
master_score = state.get("master_score", 0)
|
||||
theme = state.get("theme_substitution") or {}
|
||||
|
||||
# Get the strongest alternative from theme analysis
|
||||
stronger = theme.get("stronger_alternatives", [])
|
||||
theme_stocks = theme.get("theme_stocks_ranked", [])
|
||||
theme_name = theme.get("theme_name", "Unknown")
|
||||
|
||||
# If no stronger alternatives, this IS the best — skip deep comparison
|
||||
if not stronger and theme.get("best_expression_of_theme", True):
|
||||
result = PositionReplacementOutput(
|
||||
replace_candidate=ticker,
|
||||
replace_with="",
|
||||
score_difference=0.0,
|
||||
theme_overlap=theme_name,
|
||||
replacement_reason=f"{ticker} is the best expression of the '{theme_name}' theme.",
|
||||
conviction_level="high",
|
||||
should_replace=False,
|
||||
)
|
||||
return {"position_replacement": result.model_dump()}
|
||||
|
||||
# Format theme peers for comparison
|
||||
peer_lines = []
|
||||
for ts in theme_stocks[:6]:
|
||||
if isinstance(ts, dict):
|
||||
peer_lines.append(
|
||||
f" {ts.get('ticker', '?')}: est. score {ts.get('master_score_estimate', '?')}/10 "
|
||||
f"— advantage: {ts.get('key_advantage', 'N/A')}, weakness: {ts.get('key_weakness', 'N/A')}"
|
||||
)
|
||||
|
||||
prompt = f"""You are a Position Replacement Analyst. Determine if {ticker} should be replaced
|
||||
by a stronger alternative in the same theme.
|
||||
|
||||
CANDIDATE STOCK:
|
||||
{summary}
|
||||
|
||||
THEME: {theme_name}
|
||||
Best expression: {'Yes' if theme.get('best_expression_of_theme') else 'No'}
|
||||
Score gap vs best: {theme.get('relative_score_gap', 0):.1f}
|
||||
|
||||
THEME PEERS:
|
||||
{chr(10).join(peer_lines) or 'No peers available'}
|
||||
|
||||
STRONGER ALTERNATIVES: {', '.join(stronger) if stronger else 'None'}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Compare {ticker} to the strongest alternative in the theme.
|
||||
2. Assess on these dimensions: master score, earnings revisions, institutional flow,
|
||||
risk profile, valuation, entry timing.
|
||||
3. Set replace_with to the best alternative ticker (empty if none).
|
||||
4. Set score_difference: how much better the replacement is (positive = replacement is stronger).
|
||||
5. Set conviction_level: high / medium / low.
|
||||
- high: replacement is clearly better on 3+ dimensions.
|
||||
- medium: replacement is better on 1-2 dimensions, mixed on others.
|
||||
- low: marginal difference, keep current.
|
||||
6. Set should_replace=true only if conviction_level is high.
|
||||
7. List what the replacement is stronger_on and weaker_on vs {ticker}.
|
||||
|
||||
Be conservative. Don't recommend replacement for marginal differences."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, PositionReplacementOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("PositionReplacement LLM failed: %s", e)
|
||||
result = PositionReplacementOutput(
|
||||
replace_candidate=ticker,
|
||||
should_replace=False,
|
||||
replacement_reason="Position replacement analysis unavailable",
|
||||
)
|
||||
|
||||
result.replace_candidate = ticker
|
||||
result.theme_overlap = theme_name
|
||||
|
||||
return {"position_replacement": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
|
|
|||
|
|
@ -1,59 +1,59 @@
|
|||
"""Deterministic scoring node — no LLM, pure Python.
|
||||
|
||||
Computes master_score, applies confidence penalties, checks hard vetoes,
|
||||
and assigns position roles. This is the heart of the deterministic pipeline.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from tradingagents.models import (
|
||||
DataFlag,
|
||||
apply_confidence_penalty,
|
||||
assign_position_role,
|
||||
compute_master_score,
|
||||
)
|
||||
|
||||
|
||||
def create_scoring_node():
|
||||
"""Create the deterministic scoring node (no LLM needed)."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# Extract scores from each agent output
|
||||
bq = (state.get("business_quality") or {}).get("score_0_to_10", 5.0)
|
||||
macro = (state.get("macro") or {}).get("macro_alignment_0_to_10", 5.0)
|
||||
inst = (state.get("institutional_flow") or {}).get("score_0_to_10", 5.0)
|
||||
val = (state.get("valuation") or {}).get("score_0_to_10", 5.0)
|
||||
et = (state.get("entry_timing") or {}).get("score_0_to_10", 5.0)
|
||||
er = (state.get("earnings_revisions") or {}).get("score_0_to_10", 5.0)
|
||||
bl = (state.get("backlog") or {}).get("score_0_to_10", 5.0)
|
||||
cr = (state.get("crowding") or {}).get("score_0_to_10", 5.0)
|
||||
|
||||
# Regime adjustment from macro agent
|
||||
regime_adj = (state.get("macro") or {}).get("regime_score_adjustment", 0.0)
|
||||
|
||||
master = compute_master_score(
|
||||
bq, macro, inst, val, et, er, bl, cr,
|
||||
regime_adjustment=regime_adj,
|
||||
)
|
||||
|
||||
# Collect all data quality flags
|
||||
all_flags = []
|
||||
for f in (state.get("global_flags") or []):
|
||||
if isinstance(f, dict):
|
||||
all_flags.append(DataFlag(**f))
|
||||
elif isinstance(f, DataFlag):
|
||||
all_flags.append(f)
|
||||
|
||||
hard_veto = state.get("hard_veto", False)
|
||||
adjusted = apply_confidence_penalty(master, all_flags, hard_veto)
|
||||
role = assign_position_role(adjusted)
|
||||
|
||||
return {
|
||||
"master_score": master,
|
||||
"adjusted_score": adjusted,
|
||||
"position_role": role,
|
||||
}
|
||||
|
||||
return node
|
||||
"""Deterministic scoring node — no LLM, pure Python.
|
||||
|
||||
Computes master_score, applies confidence penalties, checks hard vetoes,
|
||||
and assigns position roles. This is the heart of the deterministic pipeline.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from tradingagents.models import (
|
||||
DataFlag,
|
||||
apply_confidence_penalty,
|
||||
assign_position_role,
|
||||
compute_master_score,
|
||||
)
|
||||
|
||||
|
||||
def create_scoring_node():
|
||||
"""Create the deterministic scoring node (no LLM needed)."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# Extract scores from each agent output
|
||||
bq = (state.get("business_quality") or {}).get("score_0_to_10", 5.0)
|
||||
macro = (state.get("macro") or {}).get("macro_alignment_0_to_10", 5.0)
|
||||
inst = (state.get("institutional_flow") or {}).get("score_0_to_10", 5.0)
|
||||
val = (state.get("valuation") or {}).get("score_0_to_10", 5.0)
|
||||
et = (state.get("entry_timing") or {}).get("score_0_to_10", 5.0)
|
||||
er = (state.get("earnings_revisions") or {}).get("score_0_to_10", 5.0)
|
||||
bl = (state.get("backlog") or {}).get("score_0_to_10", 5.0)
|
||||
cr = (state.get("crowding") or {}).get("score_0_to_10", 5.0)
|
||||
|
||||
# Regime adjustment from macro agent
|
||||
regime_adj = (state.get("macro") or {}).get("regime_score_adjustment", 0.0)
|
||||
|
||||
master = compute_master_score(
|
||||
bq, macro, inst, val, et, er, bl, cr,
|
||||
regime_adjustment=regime_adj,
|
||||
)
|
||||
|
||||
# Collect all data quality flags
|
||||
all_flags = []
|
||||
for f in (state.get("global_flags") or []):
|
||||
if isinstance(f, dict):
|
||||
all_flags.append(DataFlag(**f))
|
||||
elif isinstance(f, DataFlag):
|
||||
all_flags.append(f)
|
||||
|
||||
hard_veto = state.get("hard_veto", False)
|
||||
adjusted = apply_confidence_penalty(master, all_flags, hard_veto)
|
||||
role = assign_position_role(adjusted)
|
||||
|
||||
return {
|
||||
"master_score": master,
|
||||
"adjusted_score": adjusted,
|
||||
"position_role": role,
|
||||
}
|
||||
|
||||
return node
|
||||
|
|
|
|||
|
|
@ -1,277 +1,277 @@
|
|||
"""Tier 1 agents: Validation, Macro Regime, Liquidity.
|
||||
|
||||
Tier 1 is cheap and fast — runs on every stock. Validation is deterministic
|
||||
(no LLM). Macro and Liquidity use the quick-thinking LLM.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
import yfinance as yf
|
||||
|
||||
from tradingagents.models import (
|
||||
CompanyCard,
|
||||
DataFlag,
|
||||
LiquidityOutput,
|
||||
MacroRegimeOutput,
|
||||
ValidationOutput,
|
||||
invoke_structured,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _fmt_num(val):
|
||||
if val is None:
|
||||
return None
|
||||
if abs(val) >= 1e12:
|
||||
return f"${val / 1e12:.2f}T"
|
||||
if abs(val) >= 1e9:
|
||||
return f"${val / 1e9:.2f}B"
|
||||
if abs(val) >= 1e6:
|
||||
return f"${val / 1e6:.2f}M"
|
||||
return f"${val:,.0f}"
|
||||
|
||||
|
||||
def _fetch_yf_info(ticker: str) -> dict:
|
||||
"""Fetch yfinance info dict for a ticker."""
|
||||
try:
|
||||
t = yf.Ticker(ticker.upper())
|
||||
return t.info or {}
|
||||
except Exception as e:
|
||||
logger.warning("yfinance fetch failed for %s: %s", ticker, e)
|
||||
return {}
|
||||
|
||||
|
||||
def _fetch_macro_data() -> dict:
|
||||
"""Fetch macro indicators via yfinance."""
|
||||
from tradingagents.dataflows.y_finance import get_macro_indicators
|
||||
|
||||
try:
|
||||
raw = get_macro_indicators()
|
||||
return json.loads(raw) if isinstance(raw, str) else raw
|
||||
except Exception as e:
|
||||
logger.warning("Macro data fetch failed: %s", e)
|
||||
return {}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Validation (deterministic — no LLM)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_validation_node(llm=None):
|
||||
"""Validation + CompanyCard node. Does NOT use LLM — purely data-driven."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
info = _fetch_yf_info(ticker)
|
||||
|
||||
# No data at all → hard veto
|
||||
company_name = info.get("longName") or info.get("shortName") or ""
|
||||
if not company_name:
|
||||
v = ValidationOutput(
|
||||
ticker_valid=False,
|
||||
ticker_resolved=ticker.upper(),
|
||||
company_name="",
|
||||
veto=True,
|
||||
veto_reason=f"No company data found for {ticker}",
|
||||
data_quality_flags=[
|
||||
DataFlag(field="ticker", severity="severe",
|
||||
message=f"No data for {ticker}")
|
||||
],
|
||||
)
|
||||
return {
|
||||
"validation": v.model_dump(),
|
||||
"hard_veto": True,
|
||||
"hard_veto_reason": v.veto_reason,
|
||||
"global_flags": [
|
||||
DataFlag(field="ticker", severity="severe",
|
||||
message=f"No data for {ticker}").model_dump()
|
||||
],
|
||||
}
|
||||
|
||||
validation = ValidationOutput(
|
||||
ticker_valid=True,
|
||||
ticker_resolved=ticker.upper(),
|
||||
company_name=company_name,
|
||||
company_name_match=True,
|
||||
exchange=info.get("exchange"),
|
||||
sector=info.get("sector"),
|
||||
industry=info.get("industry"),
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Build company card
|
||||
mc = info.get("marketCap")
|
||||
if mc and mc >= 10e9:
|
||||
mc_cat = "large_cap"
|
||||
elif mc and mc >= 2e9:
|
||||
mc_cat = "mid_cap"
|
||||
elif mc and mc >= 300e6:
|
||||
mc_cat = "small_cap"
|
||||
else:
|
||||
mc_cat = "micro_cap" if mc else "unknown"
|
||||
|
||||
card = CompanyCard(
|
||||
company_name=company_name,
|
||||
ticker=ticker.upper(),
|
||||
sector=info.get("sector", "Unknown"),
|
||||
industry=info.get("industry", "Unknown"),
|
||||
description=(info.get("longBusinessSummary") or "")[:500],
|
||||
market_cap=mc,
|
||||
market_cap_formatted=_fmt_num(mc),
|
||||
market_cap_category=mc_cat,
|
||||
current_price=info.get("currentPrice") or info.get("regularMarketPrice"),
|
||||
revenue=info.get("totalRevenue"),
|
||||
profit_margins=info.get("profitMargins"),
|
||||
employees=info.get("fullTimeEmployees"),
|
||||
)
|
||||
|
||||
return {
|
||||
"validation": validation.model_dump(),
|
||||
"company_card": card.model_dump(),
|
||||
}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Macro Regime
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_macro_node(llm):
|
||||
"""Macro regime analysis node — uses quick LLM."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
macro_data = _fetch_macro_data()
|
||||
card = state.get("company_card") or {}
|
||||
sector = card.get("sector", "Unknown")
|
||||
|
||||
spy_perf = (macro_data.get("sector_performance") or {}).get("SPY", {})
|
||||
sector_perfs = macro_data.get("sector_performance") or {}
|
||||
|
||||
# Build compact sector table
|
||||
sector_lines = []
|
||||
for etf, data in sorted(sector_perfs.items()):
|
||||
r1 = data.get("return_1m")
|
||||
name = data.get("name", etf)
|
||||
if r1 is not None:
|
||||
sector_lines.append(f" {etf} ({name}): {r1:+.1f}% 1M")
|
||||
|
||||
prompt = f"""You are a Macro Regime Analyst in a structured equity ranking pipeline.
|
||||
|
||||
Ticker: {ticker} | Sector: {sector}
|
||||
|
||||
MACRO DATA (source: yfinance):
|
||||
- VIX: {macro_data.get('vix_level', 'N/A')} (source: yfinance)
|
||||
- 10Y Yield: {macro_data.get('ten_year_yield', 'N/A')}% (source: yfinance)
|
||||
- Dollar 1M: {macro_data.get('dollar_1m_return', 'N/A')}% (source: yfinance)
|
||||
- Credit Spreads: {macro_data.get('credit_spread_direction', 'N/A')} (source: yfinance)
|
||||
- SPY 1M: {spy_perf.get('return_1m', 'N/A')}% (source: yfinance)
|
||||
|
||||
SECTOR PERFORMANCE (1M, source: yfinance):
|
||||
{chr(10).join(sector_lines[:12]) or 'N/A'}
|
||||
|
||||
NOTE: If a metric shows 'N/A' or 'unknown', say 'data unavailable' rather than guessing.
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Classify risk_appetite: "risk-on" / "risk-off" / "transitional".
|
||||
- risk-on: VIX low, spreads tight, SPY up, breadth strong.
|
||||
- risk-off: VIX elevated, spreads widening, SPY down, flight to safety.
|
||||
- transitional: mixed signals.
|
||||
2. Classify liquidity_regime: "expansion" / "contraction" / "neutral".
|
||||
- expansion: falling yields, dovish Fed, credit flowing, dollar weakening.
|
||||
- contraction: rising yields, hawkish Fed, tight credit, dollar strengthening.
|
||||
3. Set regime_score_adjustment (-10 to +10):
|
||||
- +5 to +10 = strong macro tailwind for this specific stock/sector.
|
||||
- +1 to +4 = mild tailwind.
|
||||
- 0 = neutral.
|
||||
- -1 to -4 = mild headwind.
|
||||
- -5 to -10 = severe macro headwind (risk-off + contraction + hostile sector).
|
||||
This adjustment directly modifies the 0-100 master score for ALL stocks.
|
||||
4. Score macro_alignment_0_to_10: how well macro supports {ticker} specifically.
|
||||
5. Also provide score_0_to_10 (overall macro health).
|
||||
6. Set regime_label: descriptive label (e.g., "Late Cycle Risk-Off").
|
||||
7. List key positives, negatives, risks. Be concise."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, MacroRegimeOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("Macro LLM call failed: %s", e)
|
||||
result = MacroRegimeOutput(
|
||||
score_0_to_10=5.0, confidence_0_to_1=0.1,
|
||||
summary_1_sentence="Macro analysis unavailable",
|
||||
data_quality_flags=[
|
||||
DataFlag(field="macro", severity="moderate", message=str(e))
|
||||
],
|
||||
)
|
||||
|
||||
# Override with actual fetched data
|
||||
result.vix_level = macro_data.get("vix_level")
|
||||
result.vix_regime = macro_data.get("vix_regime", "unknown")
|
||||
result.ten_year_yield = macro_data.get("ten_year_yield")
|
||||
result.dollar_strength = macro_data.get("dollar_strength", "unknown")
|
||||
result.credit_spread_direction = macro_data.get(
|
||||
"credit_spread_direction", "unknown"
|
||||
)
|
||||
result.spy_1m_return = spy_perf.get("return_1m")
|
||||
|
||||
flags = [f.model_dump() for f in result.data_quality_flags]
|
||||
return {"macro": result.model_dump(), "global_flags": flags}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Liquidity
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_liquidity_node(llm):
|
||||
"""Liquidity analysis node — uses quick LLM."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
macro_data = _fetch_macro_data()
|
||||
card = state.get("company_card") or {}
|
||||
|
||||
prompt = f"""You are a Liquidity Analyst in a structured equity ranking pipeline.
|
||||
|
||||
Ticker: {ticker} | Sector: {card.get('sector', 'Unknown')}
|
||||
|
||||
AVAILABLE DATA (source: yfinance macro API):
|
||||
- VIX: {macro_data.get('vix_level', 'N/A')} (source: yfinance)
|
||||
- 10Y Yield: {macro_data.get('ten_year_yield', 'N/A')}% (source: yfinance)
|
||||
- Credit Spreads: {macro_data.get('credit_spread_direction', 'N/A')} (source: yfinance)
|
||||
- Dollar Strength: {macro_data.get('dollar_strength', 'N/A')} (source: yfinance)
|
||||
|
||||
NOTE: If a metric shows 'N/A' or 'unknown', say 'data unavailable' rather than guessing.
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Assess Fed stance (dovish / neutral / hawkish) based on yield environment.
|
||||
2. Assess market breadth (strong / moderate / weak).
|
||||
3. Assess volume profile (above_average / average / below_average).
|
||||
4. Assess SPY trend (uptrend / downtrend / sideways).
|
||||
5. Score overall liquidity favorability 0-10 for this stock.
|
||||
6. Be concise."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, LiquidityOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("Liquidity LLM call failed: %s", e)
|
||||
result = LiquidityOutput(
|
||||
score_0_to_10=5.0, confidence_0_to_1=0.1,
|
||||
summary_1_sentence="Liquidity analysis unavailable",
|
||||
)
|
||||
|
||||
flags = [f.model_dump() for f in result.data_quality_flags]
|
||||
return {"liquidity": result.model_dump(), "global_flags": flags}
|
||||
|
||||
return node
|
||||
"""Tier 1 agents: Validation, Macro Regime, Liquidity.
|
||||
|
||||
Tier 1 is cheap and fast — runs on every stock. Validation is deterministic
|
||||
(no LLM). Macro and Liquidity use the quick-thinking LLM.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
import yfinance as yf
|
||||
|
||||
from tradingagents.models import (
|
||||
CompanyCard,
|
||||
DataFlag,
|
||||
LiquidityOutput,
|
||||
MacroRegimeOutput,
|
||||
ValidationOutput,
|
||||
invoke_structured,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _fmt_num(val):
|
||||
if val is None:
|
||||
return None
|
||||
if abs(val) >= 1e12:
|
||||
return f"${val / 1e12:.2f}T"
|
||||
if abs(val) >= 1e9:
|
||||
return f"${val / 1e9:.2f}B"
|
||||
if abs(val) >= 1e6:
|
||||
return f"${val / 1e6:.2f}M"
|
||||
return f"${val:,.0f}"
|
||||
|
||||
|
||||
def _fetch_yf_info(ticker: str) -> dict:
|
||||
"""Fetch yfinance info dict for a ticker."""
|
||||
try:
|
||||
t = yf.Ticker(ticker.upper())
|
||||
return t.info or {}
|
||||
except Exception as e:
|
||||
logger.warning("yfinance fetch failed for %s: %s", ticker, e)
|
||||
return {}
|
||||
|
||||
|
||||
def _fetch_macro_data() -> dict:
|
||||
"""Fetch macro indicators via yfinance."""
|
||||
from tradingagents.dataflows.y_finance import get_macro_indicators
|
||||
|
||||
try:
|
||||
raw = get_macro_indicators()
|
||||
return json.loads(raw) if isinstance(raw, str) else raw
|
||||
except Exception as e:
|
||||
logger.warning("Macro data fetch failed: %s", e)
|
||||
return {}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Validation (deterministic — no LLM)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_validation_node(llm=None):
|
||||
"""Validation + CompanyCard node. Does NOT use LLM — purely data-driven."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
info = _fetch_yf_info(ticker)
|
||||
|
||||
# No data at all → hard veto
|
||||
company_name = info.get("longName") or info.get("shortName") or ""
|
||||
if not company_name:
|
||||
v = ValidationOutput(
|
||||
ticker_valid=False,
|
||||
ticker_resolved=ticker.upper(),
|
||||
company_name="",
|
||||
veto=True,
|
||||
veto_reason=f"No company data found for {ticker}",
|
||||
data_quality_flags=[
|
||||
DataFlag(field="ticker", severity="severe",
|
||||
message=f"No data for {ticker}")
|
||||
],
|
||||
)
|
||||
return {
|
||||
"validation": v.model_dump(),
|
||||
"hard_veto": True,
|
||||
"hard_veto_reason": v.veto_reason,
|
||||
"global_flags": [
|
||||
DataFlag(field="ticker", severity="severe",
|
||||
message=f"No data for {ticker}").model_dump()
|
||||
],
|
||||
}
|
||||
|
||||
validation = ValidationOutput(
|
||||
ticker_valid=True,
|
||||
ticker_resolved=ticker.upper(),
|
||||
company_name=company_name,
|
||||
company_name_match=True,
|
||||
exchange=info.get("exchange"),
|
||||
sector=info.get("sector"),
|
||||
industry=info.get("industry"),
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Build company card
|
||||
mc = info.get("marketCap")
|
||||
if mc and mc >= 10e9:
|
||||
mc_cat = "large_cap"
|
||||
elif mc and mc >= 2e9:
|
||||
mc_cat = "mid_cap"
|
||||
elif mc and mc >= 300e6:
|
||||
mc_cat = "small_cap"
|
||||
else:
|
||||
mc_cat = "micro_cap" if mc else "unknown"
|
||||
|
||||
card = CompanyCard(
|
||||
company_name=company_name,
|
||||
ticker=ticker.upper(),
|
||||
sector=info.get("sector", "Unknown"),
|
||||
industry=info.get("industry", "Unknown"),
|
||||
description=(info.get("longBusinessSummary") or "")[:500],
|
||||
market_cap=mc,
|
||||
market_cap_formatted=_fmt_num(mc),
|
||||
market_cap_category=mc_cat,
|
||||
current_price=info.get("currentPrice") or info.get("regularMarketPrice"),
|
||||
revenue=info.get("totalRevenue"),
|
||||
profit_margins=info.get("profitMargins"),
|
||||
employees=info.get("fullTimeEmployees"),
|
||||
)
|
||||
|
||||
return {
|
||||
"validation": validation.model_dump(),
|
||||
"company_card": card.model_dump(),
|
||||
}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Macro Regime
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_macro_node(llm):
|
||||
"""Macro regime analysis node — uses quick LLM."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
macro_data = _fetch_macro_data()
|
||||
card = state.get("company_card") or {}
|
||||
sector = card.get("sector", "Unknown")
|
||||
|
||||
spy_perf = (macro_data.get("sector_performance") or {}).get("SPY", {})
|
||||
sector_perfs = macro_data.get("sector_performance") or {}
|
||||
|
||||
# Build compact sector table
|
||||
sector_lines = []
|
||||
for etf, data in sorted(sector_perfs.items()):
|
||||
r1 = data.get("return_1m")
|
||||
name = data.get("name", etf)
|
||||
if r1 is not None:
|
||||
sector_lines.append(f" {etf} ({name}): {r1:+.1f}% 1M")
|
||||
|
||||
prompt = f"""You are a Macro Regime Analyst in a structured equity ranking pipeline.
|
||||
|
||||
Ticker: {ticker} | Sector: {sector}
|
||||
|
||||
MACRO DATA (source: yfinance):
|
||||
- VIX: {macro_data.get('vix_level', 'N/A')} (source: yfinance)
|
||||
- 10Y Yield: {macro_data.get('ten_year_yield', 'N/A')}% (source: yfinance)
|
||||
- Dollar 1M: {macro_data.get('dollar_1m_return', 'N/A')}% (source: yfinance)
|
||||
- Credit Spreads: {macro_data.get('credit_spread_direction', 'N/A')} (source: yfinance)
|
||||
- SPY 1M: {spy_perf.get('return_1m', 'N/A')}% (source: yfinance)
|
||||
|
||||
SECTOR PERFORMANCE (1M, source: yfinance):
|
||||
{chr(10).join(sector_lines[:12]) or 'N/A'}
|
||||
|
||||
NOTE: If a metric shows 'N/A' or 'unknown', say 'data unavailable' rather than guessing.
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Classify risk_appetite: "risk-on" / "risk-off" / "transitional".
|
||||
- risk-on: VIX low, spreads tight, SPY up, breadth strong.
|
||||
- risk-off: VIX elevated, spreads widening, SPY down, flight to safety.
|
||||
- transitional: mixed signals.
|
||||
2. Classify liquidity_regime: "expansion" / "contraction" / "neutral".
|
||||
- expansion: falling yields, dovish Fed, credit flowing, dollar weakening.
|
||||
- contraction: rising yields, hawkish Fed, tight credit, dollar strengthening.
|
||||
3. Set regime_score_adjustment (-10 to +10):
|
||||
- +5 to +10 = strong macro tailwind for this specific stock/sector.
|
||||
- +1 to +4 = mild tailwind.
|
||||
- 0 = neutral.
|
||||
- -1 to -4 = mild headwind.
|
||||
- -5 to -10 = severe macro headwind (risk-off + contraction + hostile sector).
|
||||
This adjustment directly modifies the 0-100 master score for ALL stocks.
|
||||
4. Score macro_alignment_0_to_10: how well macro supports {ticker} specifically.
|
||||
5. Also provide score_0_to_10 (overall macro health).
|
||||
6. Set regime_label: descriptive label (e.g., "Late Cycle Risk-Off").
|
||||
7. List key positives, negatives, risks. Be concise."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, MacroRegimeOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("Macro LLM call failed: %s", e)
|
||||
result = MacroRegimeOutput(
|
||||
score_0_to_10=5.0, confidence_0_to_1=0.1,
|
||||
summary_1_sentence="Macro analysis unavailable",
|
||||
data_quality_flags=[
|
||||
DataFlag(field="macro", severity="moderate", message=str(e))
|
||||
],
|
||||
)
|
||||
|
||||
# Override with actual fetched data
|
||||
result.vix_level = macro_data.get("vix_level")
|
||||
result.vix_regime = macro_data.get("vix_regime", "unknown")
|
||||
result.ten_year_yield = macro_data.get("ten_year_yield")
|
||||
result.dollar_strength = macro_data.get("dollar_strength", "unknown")
|
||||
result.credit_spread_direction = macro_data.get(
|
||||
"credit_spread_direction", "unknown"
|
||||
)
|
||||
result.spy_1m_return = spy_perf.get("return_1m")
|
||||
|
||||
flags = [f.model_dump() for f in result.data_quality_flags]
|
||||
return {"macro": result.model_dump(), "global_flags": flags}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Liquidity
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_liquidity_node(llm):
|
||||
"""Liquidity analysis node — uses quick LLM."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
macro_data = _fetch_macro_data()
|
||||
card = state.get("company_card") or {}
|
||||
|
||||
prompt = f"""You are a Liquidity Analyst in a structured equity ranking pipeline.
|
||||
|
||||
Ticker: {ticker} | Sector: {card.get('sector', 'Unknown')}
|
||||
|
||||
AVAILABLE DATA (source: yfinance macro API):
|
||||
- VIX: {macro_data.get('vix_level', 'N/A')} (source: yfinance)
|
||||
- 10Y Yield: {macro_data.get('ten_year_yield', 'N/A')}% (source: yfinance)
|
||||
- Credit Spreads: {macro_data.get('credit_spread_direction', 'N/A')} (source: yfinance)
|
||||
- Dollar Strength: {macro_data.get('dollar_strength', 'N/A')} (source: yfinance)
|
||||
|
||||
NOTE: If a metric shows 'N/A' or 'unknown', say 'data unavailable' rather than guessing.
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Assess Fed stance (dovish / neutral / hawkish) based on yield environment.
|
||||
2. Assess market breadth (strong / moderate / weak).
|
||||
3. Assess volume profile (above_average / average / below_average).
|
||||
4. Assess SPY trend (uptrend / downtrend / sideways).
|
||||
5. Score overall liquidity favorability 0-10 for this stock.
|
||||
6. Be concise."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, LiquidityOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("Liquidity LLM call failed: %s", e)
|
||||
result = LiquidityOutput(
|
||||
score_0_to_10=5.0, confidence_0_to_1=0.1,
|
||||
summary_1_sentence="Liquidity analysis unavailable",
|
||||
)
|
||||
|
||||
flags = [f.model_dump() for f in result.data_quality_flags]
|
||||
return {"liquidity": result.model_dump(), "global_flags": flags}
|
||||
|
||||
return node
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,418 +1,418 @@
|
|||
"""Tier 3 agents: Bull/Bear debate, Risk assessment, Final decision.
|
||||
|
||||
Only runs on stocks that pass Tier 1 + Tier 2. Uses the deep-thinking LLM
|
||||
for reasoning-heavy tasks (debate, risk, final synthesis).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from tradingagents.models import (
|
||||
BearCaseOutput,
|
||||
BullCaseOutput,
|
||||
DataFlag,
|
||||
DebateRefereeOutput,
|
||||
FinalDecisionOutput,
|
||||
RiskInvalidationOutput,
|
||||
invoke_structured,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _low_confidence_warnings(state: Dict[str, Any]) -> str:
|
||||
"""Check if any Tier 2 agents have confidence < 0.2 and return warnings."""
|
||||
_TIER2_FIELDS = {
|
||||
"business_quality": "Business Quality",
|
||||
"institutional_flow": "Institutional Flow",
|
||||
"valuation": "Valuation",
|
||||
"entry_timing": "Entry Timing",
|
||||
"earnings_revisions": "Earnings Revisions",
|
||||
"sector_rotation": "Sector Rotation",
|
||||
"backlog": "Backlog / Order Momentum",
|
||||
"crowding": "Narrative Crowding",
|
||||
}
|
||||
warnings = []
|
||||
for field, display_name in _TIER2_FIELDS.items():
|
||||
agent_data = state.get(field) or {}
|
||||
conf = agent_data.get("confidence_0_to_1")
|
||||
if conf is not None and conf < 0.2:
|
||||
warnings.append(
|
||||
f" WARNING: {display_name} has low confidence ({conf:.2f}) — "
|
||||
f"its score may be unreliable (fallback defaults or poor data)"
|
||||
)
|
||||
if warnings:
|
||||
return "\nDATA QUALITY WARNINGS:\n" + "\n".join(warnings) + "\n"
|
||||
return ""
|
||||
|
||||
|
||||
def _summarize_tier2(state: Dict[str, Any]) -> str:
|
||||
"""Build a compact summary of all Tier 1+2 findings for Tier 3 prompts."""
|
||||
card = state.get("company_card") or {}
|
||||
macro = state.get("macro") or {}
|
||||
liq = state.get("liquidity") or {}
|
||||
bq = state.get("business_quality") or {}
|
||||
inst = state.get("institutional_flow") or {}
|
||||
val = state.get("valuation") or {}
|
||||
et = state.get("entry_timing") or {}
|
||||
er = state.get("earnings_revisions") or {}
|
||||
sr = state.get("sector_rotation") or {}
|
||||
bl = state.get("backlog") or {}
|
||||
cr = state.get("crowding") or {}
|
||||
arch = state.get("archetype") or {}
|
||||
|
||||
# Check for low-confidence Tier 2 agents
|
||||
confidence_warnings = _low_confidence_warnings(state)
|
||||
|
||||
lines = [
|
||||
f"Company: {card.get('company_name', '?')} ({card.get('ticker', '?')})",
|
||||
f"Sector: {card.get('sector', '?')} | Industry: {card.get('industry', '?')}",
|
||||
f"Market Cap: {card.get('market_cap_formatted', 'N/A')}",
|
||||
f"Price: ${card.get('current_price', 'N/A')}",
|
||||
f"Archetype: {arch.get('archetype', 'N/A')}",
|
||||
"",
|
||||
f"Master Score: {state.get('master_score', 'N/A')} | Role: {state.get('position_role', 'N/A')}",
|
||||
"",
|
||||
"AGENT SCORES (0-10):",
|
||||
f" Business Quality: {bq.get('score_0_to_10', 'N/A')} — {bq.get('summary_1_sentence', '')}",
|
||||
f" Macro Alignment: {macro.get('macro_alignment_0_to_10', 'N/A')} — {macro.get('summary_1_sentence', '')}",
|
||||
f" Institutional Flow: {inst.get('score_0_to_10', 'N/A')} — {inst.get('summary_1_sentence', '')}",
|
||||
f" Valuation: {val.get('score_0_to_10', 'N/A')} — {val.get('summary_1_sentence', '')}",
|
||||
f" Entry Timing: {et.get('score_0_to_10', 'N/A')} — {et.get('summary_1_sentence', '')}",
|
||||
f" Earnings Revisions: {er.get('score_0_to_10', 'N/A')} — {er.get('summary_1_sentence', '')}",
|
||||
f" Sector Rotation: {sr.get('score_0_to_10', 'N/A')} — {sr.get('summary_1_sentence', '')}",
|
||||
f" Backlog: {bl.get('score_0_to_10', 'N/A')} — {bl.get('summary_1_sentence', '')}",
|
||||
f" Crowding: {cr.get('score_0_to_10', 'N/A')} — {cr.get('summary_1_sentence', '')}",
|
||||
f" Liquidity: {liq.get('score_0_to_10', 'N/A')} — {liq.get('summary_1_sentence', '')}",
|
||||
"",
|
||||
f" Macro Regime: {macro.get('regime_label', '?')} | VIX: {macro.get('vix_level', '?')}",
|
||||
f" Risk Appetite: {macro.get('risk_appetite', '?')} | Liquidity Regime: {macro.get('liquidity_regime', '?')}",
|
||||
f" Regime Score Adjustment: {macro.get('regime_score_adjustment', 0):+.1f}",
|
||||
f" Moat: {bq.get('competitive_moat', '?')} | Valuation: {val.get('valuation_verdict', '?')}",
|
||||
f" Smart Money: {inst.get('smart_money_signal', '?')} | Accumulation: {inst.get('accumulation_signal', '?')}",
|
||||
f" Short Trend: {inst.get('short_interest_trend', '?')} | Insider Signal: {inst.get('insider_transaction_signal', '?')}",
|
||||
f" Timing: {et.get('timing_verdict', '?')}",
|
||||
]
|
||||
|
||||
if confidence_warnings:
|
||||
lines.append("")
|
||||
lines.append(confidence_warnings)
|
||||
lines.append("Factor these warnings into your analysis — low-confidence scores may not reflect reality.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Bull Case
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_bull_case_node(llm):
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
summary = _summarize_tier2(state)
|
||||
|
||||
prompt = f"""You are a Bull Case Researcher. Build the strongest possible bullish thesis for {ticker}.
|
||||
|
||||
{summary}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Write a concise thesis (2-3 sentences) for why this stock should be bought.
|
||||
2. List 3-5 specific catalysts that could drive the stock higher.
|
||||
3. Estimate upside_target (price) and upside_pct from current price.
|
||||
4. List key assumptions your thesis depends on.
|
||||
5. List thesis_invalidation_triggers — what would kill the bull case.
|
||||
6. Set confidence 0-1 for how strong the bull case is.
|
||||
|
||||
Attack the investment aggressively. Find every reason to be bullish.
|
||||
But be honest — don't fabricate catalysts. Use the data above."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, BullCaseOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("BullCase LLM failed: %s", e)
|
||||
result = BullCaseOutput(
|
||||
thesis="Bull case analysis unavailable",
|
||||
confidence_0_to_1=0.1,
|
||||
)
|
||||
|
||||
return {"bull_case": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Bear Case
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_bear_case_node(llm):
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
summary = _summarize_tier2(state)
|
||||
|
||||
prompt = f"""You are a Bear Case Researcher. Build the strongest possible bearish thesis for {ticker}.
|
||||
|
||||
{summary}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Write a concise thesis (2-3 sentences) for why this stock should be avoided or sold.
|
||||
2. List 3-5 specific risks that could drive the stock lower.
|
||||
3. Estimate downside_target (price) and downside_pct from current price.
|
||||
4. List key assumptions your bear thesis depends on.
|
||||
5. List thesis_invalidation_triggers — what would kill the bear case.
|
||||
6. Set confidence 0-1 for how strong the bear case is.
|
||||
|
||||
Be ruthless. Find every vulnerability, every overvaluation, every risk.
|
||||
But be honest — don't fabricate risks. Use the data above."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, BearCaseOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("BearCase LLM failed: %s", e)
|
||||
result = BearCaseOutput(
|
||||
thesis="Bear case analysis unavailable",
|
||||
confidence_0_to_1=0.1,
|
||||
)
|
||||
|
||||
return {"bear_case": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Debate Referee
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_debate_node(llm):
|
||||
"""Referee that evaluates bull vs bear case."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
bull = state.get("bull_case") or {}
|
||||
bear = state.get("bear_case") or {}
|
||||
|
||||
prompt = f"""You are the Debate Referee. Evaluate the bull vs bear case for {ticker}.
|
||||
|
||||
BULL CASE (confidence: {bull.get('confidence_0_to_1', 'N/A')}):
|
||||
Thesis: {bull.get('thesis', 'N/A')}
|
||||
Catalysts: {', '.join(bull.get('catalysts', []))}
|
||||
Upside: {bull.get('upside_pct', 'N/A')}%
|
||||
Invalidation: {', '.join(bull.get('thesis_invalidation_triggers', []))}
|
||||
|
||||
BEAR CASE (confidence: {bear.get('confidence_0_to_1', 'N/A')}):
|
||||
Thesis: {bear.get('thesis', 'N/A')}
|
||||
Risks: {', '.join(bear.get('risks', []))}
|
||||
Downside: {bear.get('downside_pct', 'N/A')}%
|
||||
Invalidation: {', '.join(bear.get('thesis_invalidation_triggers', []))}
|
||||
|
||||
MASTER SCORE: {state.get('master_score', 'N/A')} | ROLE: {state.get('position_role', 'N/A')}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Declare winner: "bull" or "bear".
|
||||
2. Score each side 0-10 on argument strength.
|
||||
3. List key unresolved questions.
|
||||
4. Set net_conviction_adjustment (-2 to +2) to modify the master score.
|
||||
Positive = debate strengthened the bull case. Negative = weakened it.
|
||||
5. Provide reasoning for your decision."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, DebateRefereeOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("Debate LLM failed: %s", e)
|
||||
result = DebateRefereeOutput()
|
||||
|
||||
return {"debate": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Risk / Invalidation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_risk_node(llm):
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
summary = _summarize_tier2(state)
|
||||
bull = state.get("bull_case") or {}
|
||||
bear = state.get("bear_case") or {}
|
||||
debate = state.get("debate") or {}
|
||||
|
||||
prompt = f"""You are the Risk / Invalidation Analyst. Final risk gate for {ticker}.
|
||||
|
||||
{summary}
|
||||
|
||||
DEBATE OUTCOME: {debate.get('winner', '?')} won
|
||||
Bull strength: {debate.get('bull_strength_0_to_10', '?')}/10
|
||||
Bear strength: {debate.get('bear_strength_0_to_10', '?')}/10
|
||||
Conviction adjustment: {debate.get('net_conviction_adjustment', 0)}
|
||||
|
||||
Bear risks: {', '.join(bear.get('risks', []))}
|
||||
Bull invalidation triggers: {', '.join(bull.get('thesis_invalidation_triggers', []))}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Classify overall_risk_level: low / medium / high.
|
||||
2. Set max_position_size_pct (0-100). Low risk = up to 10%. High risk = max 2%.
|
||||
3. Suggest stop_loss_pct (distance from entry to stop).
|
||||
4. List invalidation_triggers — concrete events that should trigger exit.
|
||||
5. Score overall risk-reward 0-10 (10 = great risk/reward).
|
||||
6. Set veto=true ONLY if you find impossible/fraudulent data, or risk is so extreme
|
||||
that no position should be taken. This is a hard kill switch.
|
||||
7. Be concise."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, RiskInvalidationOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("Risk LLM failed: %s", e)
|
||||
result = RiskInvalidationOutput(
|
||||
score_0_to_10=5.0, confidence_0_to_1=0.3,
|
||||
summary_1_sentence="Risk analysis unavailable",
|
||||
)
|
||||
|
||||
flags = [f.model_dump() for f in result.data_quality_flags]
|
||||
update: Dict[str, Any] = {"risk": result.model_dump(), "global_flags": flags}
|
||||
|
||||
if result.veto:
|
||||
update["hard_veto"] = True
|
||||
update["hard_veto_reason"] = result.veto_reason
|
||||
|
||||
return update
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Final Decision (prose generated AFTER all scoring)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_final_decision_node(llm):
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
card = state.get("company_card") or {}
|
||||
summary = _summarize_tier2(state)
|
||||
|
||||
bull = state.get("bull_case") or {}
|
||||
bear = state.get("bear_case") or {}
|
||||
debate = state.get("debate") or {}
|
||||
risk = state.get("risk") or {}
|
||||
theme = state.get("theme_substitution") or {}
|
||||
replacement = state.get("position_replacement") or {}
|
||||
|
||||
master_score = state.get("master_score", 0)
|
||||
adjusted_score = state.get("adjusted_score", 0)
|
||||
position_role = state.get("position_role", "Avoid")
|
||||
conviction_adj = debate.get("net_conviction_adjustment", 0)
|
||||
|
||||
# Apply debate conviction adjustment
|
||||
final_score = round(adjusted_score + conviction_adj, 2)
|
||||
final_role = _role_from_score(final_score)
|
||||
|
||||
# Determine action
|
||||
if state.get("hard_veto"):
|
||||
action = "AVOID"
|
||||
final_role = "Avoid"
|
||||
final_score = 0.0
|
||||
elif final_score >= 70:
|
||||
action = "BUY"
|
||||
elif final_score >= 50:
|
||||
action = "HOLD"
|
||||
else:
|
||||
action = "AVOID"
|
||||
|
||||
# Theme/replacement context
|
||||
theme_lines = ""
|
||||
if theme.get("theme_name"):
|
||||
theme_lines = (
|
||||
f"\nTHEME CONTEXT:"
|
||||
f"\n Theme: {theme.get('theme_name', '?')}"
|
||||
f"\n Best expression: {'Yes' if theme.get('best_expression_of_theme') else 'No'}"
|
||||
f"\n Stronger alternatives: {', '.join(theme.get('stronger_alternatives', [])) or 'None'}"
|
||||
f"\n Score gap vs best: {theme.get('relative_score_gap', 0):.1f}"
|
||||
)
|
||||
if replacement.get("should_replace"):
|
||||
theme_lines += (
|
||||
f"\n REPLACEMENT FLAG: Consider {replacement.get('replace_with', '?')} instead"
|
||||
f"\n Reason: {replacement.get('replacement_reason', '')}"
|
||||
)
|
||||
|
||||
prompt = f"""You are the Final Decision Synthesizer for {ticker}.
|
||||
|
||||
{summary}
|
||||
|
||||
DEBATE: {debate.get('winner', '?')} won | Conviction adjustment: {conviction_adj:+.1f}
|
||||
RISK: {risk.get('overall_risk_level', '?')} | Max position: {risk.get('max_position_size_pct', '?')}%
|
||||
{theme_lines}
|
||||
|
||||
FINAL SCORES:
|
||||
Master Score: {master_score}
|
||||
Adjusted Score: {adjusted_score} (after data quality penalties)
|
||||
Post-Debate Score: {final_score} (after conviction adjustment)
|
||||
Position Role: {final_role}
|
||||
Action: {action}
|
||||
|
||||
INSTRUCTIONS:
|
||||
Write a concise narrative (3-5 sentences) that:
|
||||
1. Summarizes the investment thesis.
|
||||
2. Highlights the top 2-3 catalysts and top 2-3 risks.
|
||||
3. States the action ({action}) and position role ({final_role}).
|
||||
4. Notes what would change the thesis (invalidation triggers).
|
||||
5. If theme analysis found stronger alternatives, mention them and whether
|
||||
this stock is still the best expression of the theme.
|
||||
|
||||
Also provide:
|
||||
- thesis_summary (one sentence)
|
||||
- key_catalysts (top 3 from bull case)
|
||||
- key_risks (top 3 from bear case)
|
||||
- invalidation_triggers (from risk agent)
|
||||
- position_sizing_pct (from risk agent)
|
||||
- confidence (average of all agent confidences)"""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, FinalDecisionOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("FinalDecision LLM failed: %s", e)
|
||||
result = FinalDecisionOutput()
|
||||
|
||||
# Override with computed values (deterministic, not LLM-driven)
|
||||
result.ticker = ticker
|
||||
result.company_name = card.get("company_name", "")
|
||||
result.master_score = master_score
|
||||
result.adjusted_score = final_score
|
||||
result.position_role = final_role
|
||||
result.action = action
|
||||
result.risk_level = risk.get("overall_risk_level", "medium")
|
||||
result.position_sizing_pct = risk.get("max_position_size_pct", 0)
|
||||
|
||||
# Compute aggregate confidence
|
||||
agents_with_confidence = [
|
||||
state.get(k, {}).get("confidence_0_to_1")
|
||||
for k in (
|
||||
"macro", "liquidity", "business_quality", "institutional_flow",
|
||||
"valuation", "entry_timing", "earnings_revisions",
|
||||
"sector_rotation", "backlog", "crowding",
|
||||
)
|
||||
]
|
||||
valid_confs = [c for c in agents_with_confidence if c is not None]
|
||||
result.confidence = round(sum(valid_confs) / len(valid_confs), 2) if valid_confs else 0.5
|
||||
|
||||
return {"final_decision": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def _role_from_score(score: float) -> str:
|
||||
if score > 80:
|
||||
return "Core Position"
|
||||
if score > 70:
|
||||
return "Strong Position"
|
||||
if score > 60:
|
||||
return "Tactical / Satellite"
|
||||
if score > 50:
|
||||
return "Watchlist"
|
||||
return "Avoid"
|
||||
"""Tier 3 agents: Bull/Bear debate, Risk assessment, Final decision.
|
||||
|
||||
Only runs on stocks that pass Tier 1 + Tier 2. Uses the deep-thinking LLM
|
||||
for reasoning-heavy tasks (debate, risk, final synthesis).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from tradingagents.models import (
|
||||
BearCaseOutput,
|
||||
BullCaseOutput,
|
||||
DataFlag,
|
||||
DebateRefereeOutput,
|
||||
FinalDecisionOutput,
|
||||
RiskInvalidationOutput,
|
||||
invoke_structured,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _low_confidence_warnings(state: Dict[str, Any]) -> str:
|
||||
"""Check if any Tier 2 agents have confidence < 0.2 and return warnings."""
|
||||
_TIER2_FIELDS = {
|
||||
"business_quality": "Business Quality",
|
||||
"institutional_flow": "Institutional Flow",
|
||||
"valuation": "Valuation",
|
||||
"entry_timing": "Entry Timing",
|
||||
"earnings_revisions": "Earnings Revisions",
|
||||
"sector_rotation": "Sector Rotation",
|
||||
"backlog": "Backlog / Order Momentum",
|
||||
"crowding": "Narrative Crowding",
|
||||
}
|
||||
warnings = []
|
||||
for field, display_name in _TIER2_FIELDS.items():
|
||||
agent_data = state.get(field) or {}
|
||||
conf = agent_data.get("confidence_0_to_1")
|
||||
if conf is not None and conf < 0.2:
|
||||
warnings.append(
|
||||
f" WARNING: {display_name} has low confidence ({conf:.2f}) — "
|
||||
f"its score may be unreliable (fallback defaults or poor data)"
|
||||
)
|
||||
if warnings:
|
||||
return "\nDATA QUALITY WARNINGS:\n" + "\n".join(warnings) + "\n"
|
||||
return ""
|
||||
|
||||
|
||||
def _summarize_tier2(state: Dict[str, Any]) -> str:
|
||||
"""Build a compact summary of all Tier 1+2 findings for Tier 3 prompts."""
|
||||
card = state.get("company_card") or {}
|
||||
macro = state.get("macro") or {}
|
||||
liq = state.get("liquidity") or {}
|
||||
bq = state.get("business_quality") or {}
|
||||
inst = state.get("institutional_flow") or {}
|
||||
val = state.get("valuation") or {}
|
||||
et = state.get("entry_timing") or {}
|
||||
er = state.get("earnings_revisions") or {}
|
||||
sr = state.get("sector_rotation") or {}
|
||||
bl = state.get("backlog") or {}
|
||||
cr = state.get("crowding") or {}
|
||||
arch = state.get("archetype") or {}
|
||||
|
||||
# Check for low-confidence Tier 2 agents
|
||||
confidence_warnings = _low_confidence_warnings(state)
|
||||
|
||||
lines = [
|
||||
f"Company: {card.get('company_name', '?')} ({card.get('ticker', '?')})",
|
||||
f"Sector: {card.get('sector', '?')} | Industry: {card.get('industry', '?')}",
|
||||
f"Market Cap: {card.get('market_cap_formatted', 'N/A')}",
|
||||
f"Price: ${card.get('current_price', 'N/A')}",
|
||||
f"Archetype: {arch.get('archetype', 'N/A')}",
|
||||
"",
|
||||
f"Master Score: {state.get('master_score', 'N/A')} | Role: {state.get('position_role', 'N/A')}",
|
||||
"",
|
||||
"AGENT SCORES (0-10):",
|
||||
f" Business Quality: {bq.get('score_0_to_10', 'N/A')} — {bq.get('summary_1_sentence', '')}",
|
||||
f" Macro Alignment: {macro.get('macro_alignment_0_to_10', 'N/A')} — {macro.get('summary_1_sentence', '')}",
|
||||
f" Institutional Flow: {inst.get('score_0_to_10', 'N/A')} — {inst.get('summary_1_sentence', '')}",
|
||||
f" Valuation: {val.get('score_0_to_10', 'N/A')} — {val.get('summary_1_sentence', '')}",
|
||||
f" Entry Timing: {et.get('score_0_to_10', 'N/A')} — {et.get('summary_1_sentence', '')}",
|
||||
f" Earnings Revisions: {er.get('score_0_to_10', 'N/A')} — {er.get('summary_1_sentence', '')}",
|
||||
f" Sector Rotation: {sr.get('score_0_to_10', 'N/A')} — {sr.get('summary_1_sentence', '')}",
|
||||
f" Backlog: {bl.get('score_0_to_10', 'N/A')} — {bl.get('summary_1_sentence', '')}",
|
||||
f" Crowding: {cr.get('score_0_to_10', 'N/A')} — {cr.get('summary_1_sentence', '')}",
|
||||
f" Liquidity: {liq.get('score_0_to_10', 'N/A')} — {liq.get('summary_1_sentence', '')}",
|
||||
"",
|
||||
f" Macro Regime: {macro.get('regime_label', '?')} | VIX: {macro.get('vix_level', '?')}",
|
||||
f" Risk Appetite: {macro.get('risk_appetite', '?')} | Liquidity Regime: {macro.get('liquidity_regime', '?')}",
|
||||
f" Regime Score Adjustment: {macro.get('regime_score_adjustment', 0):+.1f}",
|
||||
f" Moat: {bq.get('competitive_moat', '?')} | Valuation: {val.get('valuation_verdict', '?')}",
|
||||
f" Smart Money: {inst.get('smart_money_signal', '?')} | Accumulation: {inst.get('accumulation_signal', '?')}",
|
||||
f" Short Trend: {inst.get('short_interest_trend', '?')} | Insider Signal: {inst.get('insider_transaction_signal', '?')}",
|
||||
f" Timing: {et.get('timing_verdict', '?')}",
|
||||
]
|
||||
|
||||
if confidence_warnings:
|
||||
lines.append("")
|
||||
lines.append(confidence_warnings)
|
||||
lines.append("Factor these warnings into your analysis — low-confidence scores may not reflect reality.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Bull Case
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_bull_case_node(llm):
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
summary = _summarize_tier2(state)
|
||||
|
||||
prompt = f"""You are a Bull Case Researcher. Build the strongest possible bullish thesis for {ticker}.
|
||||
|
||||
{summary}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Write a concise thesis (2-3 sentences) for why this stock should be bought.
|
||||
2. List 3-5 specific catalysts that could drive the stock higher.
|
||||
3. Estimate upside_target (price) and upside_pct from current price.
|
||||
4. List key assumptions your thesis depends on.
|
||||
5. List thesis_invalidation_triggers — what would kill the bull case.
|
||||
6. Set confidence 0-1 for how strong the bull case is.
|
||||
|
||||
Attack the investment aggressively. Find every reason to be bullish.
|
||||
But be honest — don't fabricate catalysts. Use the data above."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, BullCaseOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("BullCase LLM failed: %s", e)
|
||||
result = BullCaseOutput(
|
||||
thesis="Bull case analysis unavailable",
|
||||
confidence_0_to_1=0.1,
|
||||
)
|
||||
|
||||
return {"bull_case": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Bear Case
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_bear_case_node(llm):
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
summary = _summarize_tier2(state)
|
||||
|
||||
prompt = f"""You are a Bear Case Researcher. Build the strongest possible bearish thesis for {ticker}.
|
||||
|
||||
{summary}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Write a concise thesis (2-3 sentences) for why this stock should be avoided or sold.
|
||||
2. List 3-5 specific risks that could drive the stock lower.
|
||||
3. Estimate downside_target (price) and downside_pct from current price.
|
||||
4. List key assumptions your bear thesis depends on.
|
||||
5. List thesis_invalidation_triggers — what would kill the bear case.
|
||||
6. Set confidence 0-1 for how strong the bear case is.
|
||||
|
||||
Be ruthless. Find every vulnerability, every overvaluation, every risk.
|
||||
But be honest — don't fabricate risks. Use the data above."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, BearCaseOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("BearCase LLM failed: %s", e)
|
||||
result = BearCaseOutput(
|
||||
thesis="Bear case analysis unavailable",
|
||||
confidence_0_to_1=0.1,
|
||||
)
|
||||
|
||||
return {"bear_case": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Debate Referee
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_debate_node(llm):
|
||||
"""Referee that evaluates bull vs bear case."""
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
bull = state.get("bull_case") or {}
|
||||
bear = state.get("bear_case") or {}
|
||||
|
||||
prompt = f"""You are the Debate Referee. Evaluate the bull vs bear case for {ticker}.
|
||||
|
||||
BULL CASE (confidence: {bull.get('confidence_0_to_1', 'N/A')}):
|
||||
Thesis: {bull.get('thesis', 'N/A')}
|
||||
Catalysts: {', '.join(bull.get('catalysts', []))}
|
||||
Upside: {bull.get('upside_pct', 'N/A')}%
|
||||
Invalidation: {', '.join(bull.get('thesis_invalidation_triggers', []))}
|
||||
|
||||
BEAR CASE (confidence: {bear.get('confidence_0_to_1', 'N/A')}):
|
||||
Thesis: {bear.get('thesis', 'N/A')}
|
||||
Risks: {', '.join(bear.get('risks', []))}
|
||||
Downside: {bear.get('downside_pct', 'N/A')}%
|
||||
Invalidation: {', '.join(bear.get('thesis_invalidation_triggers', []))}
|
||||
|
||||
MASTER SCORE: {state.get('master_score', 'N/A')} | ROLE: {state.get('position_role', 'N/A')}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Declare winner: "bull" or "bear".
|
||||
2. Score each side 0-10 on argument strength.
|
||||
3. List key unresolved questions.
|
||||
4. Set net_conviction_adjustment (-2 to +2) to modify the master score.
|
||||
Positive = debate strengthened the bull case. Negative = weakened it.
|
||||
5. Provide reasoning for your decision."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, DebateRefereeOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("Debate LLM failed: %s", e)
|
||||
result = DebateRefereeOutput()
|
||||
|
||||
return {"debate": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Risk / Invalidation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_risk_node(llm):
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
summary = _summarize_tier2(state)
|
||||
bull = state.get("bull_case") or {}
|
||||
bear = state.get("bear_case") or {}
|
||||
debate = state.get("debate") or {}
|
||||
|
||||
prompt = f"""You are the Risk / Invalidation Analyst. Final risk gate for {ticker}.
|
||||
|
||||
{summary}
|
||||
|
||||
DEBATE OUTCOME: {debate.get('winner', '?')} won
|
||||
Bull strength: {debate.get('bull_strength_0_to_10', '?')}/10
|
||||
Bear strength: {debate.get('bear_strength_0_to_10', '?')}/10
|
||||
Conviction adjustment: {debate.get('net_conviction_adjustment', 0)}
|
||||
|
||||
Bear risks: {', '.join(bear.get('risks', []))}
|
||||
Bull invalidation triggers: {', '.join(bull.get('thesis_invalidation_triggers', []))}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Classify overall_risk_level: low / medium / high.
|
||||
2. Set max_position_size_pct (0-100). Low risk = up to 10%. High risk = max 2%.
|
||||
3. Suggest stop_loss_pct (distance from entry to stop).
|
||||
4. List invalidation_triggers — concrete events that should trigger exit.
|
||||
5. Score overall risk-reward 0-10 (10 = great risk/reward).
|
||||
6. Set veto=true ONLY if you find impossible/fraudulent data, or risk is so extreme
|
||||
that no position should be taken. This is a hard kill switch.
|
||||
7. Be concise."""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, RiskInvalidationOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("Risk LLM failed: %s", e)
|
||||
result = RiskInvalidationOutput(
|
||||
score_0_to_10=5.0, confidence_0_to_1=0.3,
|
||||
summary_1_sentence="Risk analysis unavailable",
|
||||
)
|
||||
|
||||
flags = [f.model_dump() for f in result.data_quality_flags]
|
||||
update: Dict[str, Any] = {"risk": result.model_dump(), "global_flags": flags}
|
||||
|
||||
if result.veto:
|
||||
update["hard_veto"] = True
|
||||
update["hard_veto_reason"] = result.veto_reason
|
||||
|
||||
return update
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Final Decision (prose generated AFTER all scoring)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_final_decision_node(llm):
|
||||
|
||||
def node(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ticker = state["ticker"]
|
||||
card = state.get("company_card") or {}
|
||||
summary = _summarize_tier2(state)
|
||||
|
||||
bull = state.get("bull_case") or {}
|
||||
bear = state.get("bear_case") or {}
|
||||
debate = state.get("debate") or {}
|
||||
risk = state.get("risk") or {}
|
||||
theme = state.get("theme_substitution") or {}
|
||||
replacement = state.get("position_replacement") or {}
|
||||
|
||||
master_score = state.get("master_score", 0)
|
||||
adjusted_score = state.get("adjusted_score", 0)
|
||||
position_role = state.get("position_role", "Avoid")
|
||||
conviction_adj = debate.get("net_conviction_adjustment", 0)
|
||||
|
||||
# Apply debate conviction adjustment
|
||||
final_score = round(adjusted_score + conviction_adj, 2)
|
||||
final_role = _role_from_score(final_score)
|
||||
|
||||
# Determine action
|
||||
if state.get("hard_veto"):
|
||||
action = "AVOID"
|
||||
final_role = "Avoid"
|
||||
final_score = 0.0
|
||||
elif final_score >= 70:
|
||||
action = "BUY"
|
||||
elif final_score >= 50:
|
||||
action = "HOLD"
|
||||
else:
|
||||
action = "AVOID"
|
||||
|
||||
# Theme/replacement context
|
||||
theme_lines = ""
|
||||
if theme.get("theme_name"):
|
||||
theme_lines = (
|
||||
f"\nTHEME CONTEXT:"
|
||||
f"\n Theme: {theme.get('theme_name', '?')}"
|
||||
f"\n Best expression: {'Yes' if theme.get('best_expression_of_theme') else 'No'}"
|
||||
f"\n Stronger alternatives: {', '.join(theme.get('stronger_alternatives', [])) or 'None'}"
|
||||
f"\n Score gap vs best: {theme.get('relative_score_gap', 0):.1f}"
|
||||
)
|
||||
if replacement.get("should_replace"):
|
||||
theme_lines += (
|
||||
f"\n REPLACEMENT FLAG: Consider {replacement.get('replace_with', '?')} instead"
|
||||
f"\n Reason: {replacement.get('replacement_reason', '')}"
|
||||
)
|
||||
|
||||
prompt = f"""You are the Final Decision Synthesizer for {ticker}.
|
||||
|
||||
{summary}
|
||||
|
||||
DEBATE: {debate.get('winner', '?')} won | Conviction adjustment: {conviction_adj:+.1f}
|
||||
RISK: {risk.get('overall_risk_level', '?')} | Max position: {risk.get('max_position_size_pct', '?')}%
|
||||
{theme_lines}
|
||||
|
||||
FINAL SCORES:
|
||||
Master Score: {master_score}
|
||||
Adjusted Score: {adjusted_score} (after data quality penalties)
|
||||
Post-Debate Score: {final_score} (after conviction adjustment)
|
||||
Position Role: {final_role}
|
||||
Action: {action}
|
||||
|
||||
INSTRUCTIONS:
|
||||
Write a concise narrative (3-5 sentences) that:
|
||||
1. Summarizes the investment thesis.
|
||||
2. Highlights the top 2-3 catalysts and top 2-3 risks.
|
||||
3. States the action ({action}) and position role ({final_role}).
|
||||
4. Notes what would change the thesis (invalidation triggers).
|
||||
5. If theme analysis found stronger alternatives, mention them and whether
|
||||
this stock is still the best expression of the theme.
|
||||
|
||||
Also provide:
|
||||
- thesis_summary (one sentence)
|
||||
- key_catalysts (top 3 from bull case)
|
||||
- key_risks (top 3 from bear case)
|
||||
- invalidation_triggers (from risk agent)
|
||||
- position_sizing_pct (from risk agent)
|
||||
- confidence (average of all agent confidences)"""
|
||||
|
||||
try:
|
||||
result = invoke_structured(llm, FinalDecisionOutput, prompt)
|
||||
except Exception as e:
|
||||
logger.warning("FinalDecision LLM failed: %s", e)
|
||||
result = FinalDecisionOutput()
|
||||
|
||||
# Override with computed values (deterministic, not LLM-driven)
|
||||
result.ticker = ticker
|
||||
result.company_name = card.get("company_name", "")
|
||||
result.master_score = master_score
|
||||
result.adjusted_score = final_score
|
||||
result.position_role = final_role
|
||||
result.action = action
|
||||
result.risk_level = risk.get("overall_risk_level", "medium")
|
||||
result.position_sizing_pct = risk.get("max_position_size_pct", 0)
|
||||
|
||||
# Compute aggregate confidence
|
||||
agents_with_confidence = [
|
||||
state.get(k, {}).get("confidence_0_to_1")
|
||||
for k in (
|
||||
"macro", "liquidity", "business_quality", "institutional_flow",
|
||||
"valuation", "entry_timing", "earnings_revisions",
|
||||
"sector_rotation", "backlog", "crowding",
|
||||
)
|
||||
]
|
||||
valid_confs = [c for c in agents_with_confidence if c is not None]
|
||||
result.confidence = round(sum(valid_confs) / len(valid_confs), 2) if valid_confs else 0.5
|
||||
|
||||
return {"final_decision": result.model_dump()}
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def _role_from_score(score: float) -> str:
|
||||
if score > 80:
|
||||
return "Core Position"
|
||||
if score > 70:
|
||||
return "Strong Position"
|
||||
if score > 60:
|
||||
return "Tactical / Satellite"
|
||||
if score > 50:
|
||||
return "Watchlist"
|
||||
return "Avoid"
|
||||
|
|
|
|||
|
|
@ -1,46 +1,46 @@
|
|||
import functools
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_trader(llm, memory):
|
||||
def trader_node(state, name):
|
||||
company_name = state["company_of_interest"]
|
||||
investment_plan = state["investment_plan"]
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
if past_memories:
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
else:
|
||||
past_memory_str = "No past memories found."
|
||||
|
||||
context = {
|
||||
"role": "user",
|
||||
"content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.",
|
||||
}
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""",
|
||||
},
|
||||
context,
|
||||
]
|
||||
|
||||
result = llm.invoke(messages)
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"trader_investment_plan": result.content,
|
||||
"sender": name,
|
||||
}
|
||||
|
||||
return functools.partial(trader_node, name="Trader")
|
||||
import functools
|
||||
import time
|
||||
import json
|
||||
|
||||
|
||||
def create_trader(llm, memory):
|
||||
def trader_node(state, name):
|
||||
company_name = state["company_of_interest"]
|
||||
investment_plan = state["investment_plan"]
|
||||
market_research_report = state["market_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
news_report = state["news_report"]
|
||||
fundamentals_report = state["fundamentals_report"]
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
||||
past_memory_str = ""
|
||||
if past_memories:
|
||||
for i, rec in enumerate(past_memories, 1):
|
||||
past_memory_str += rec["recommendation"] + "\n\n"
|
||||
else:
|
||||
past_memory_str = "No past memories found."
|
||||
|
||||
context = {
|
||||
"role": "user",
|
||||
"content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.",
|
||||
}
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""",
|
||||
},
|
||||
context,
|
||||
]
|
||||
|
||||
result = llm.invoke(messages)
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
"trader_investment_plan": result.content,
|
||||
"sender": name,
|
||||
}
|
||||
|
||||
return functools.partial(trader_node, name="Trader")
|
||||
|
|
|
|||
|
|
@ -1,109 +1,109 @@
|
|||
"""State definitions for the TradingAgents pipeline.
|
||||
|
||||
PipelineState is the new structured state used by the equity ranking engine.
|
||||
Legacy state types are preserved for backward compatibility.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import operator
|
||||
from typing import Annotated, Optional, Sequence
|
||||
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langgraph.graph import MessagesState
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# New structured pipeline state
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class PipelineState(TypedDict):
|
||||
"""Shared state for the structured equity ranking pipeline.
|
||||
|
||||
Each agent writes its output as a dict (Pydantic .model_dump()).
|
||||
The scoring node computes master_score/adjusted_score deterministically.
|
||||
global_flags uses operator.add to accumulate across all agents.
|
||||
"""
|
||||
ticker: str
|
||||
trade_date: str
|
||||
|
||||
# Tier 1
|
||||
validation: Optional[dict]
|
||||
company_card: Optional[dict]
|
||||
macro: Optional[dict]
|
||||
liquidity: Optional[dict]
|
||||
|
||||
# Tier 2
|
||||
sector_rotation: Optional[dict]
|
||||
business_quality: Optional[dict]
|
||||
institutional_flow: Optional[dict]
|
||||
valuation: Optional[dict]
|
||||
entry_timing: Optional[dict]
|
||||
earnings_revisions: Optional[dict]
|
||||
backlog: Optional[dict]
|
||||
crowding: Optional[dict]
|
||||
archetype: Optional[dict]
|
||||
|
||||
# Scoring (deterministic)
|
||||
master_score: Optional[float]
|
||||
adjusted_score: Optional[float]
|
||||
position_role: Optional[str]
|
||||
|
||||
# Portfolio-level
|
||||
theme_substitution: Optional[dict]
|
||||
position_replacement: Optional[dict]
|
||||
|
||||
# Tier 3
|
||||
bull_case: Optional[dict]
|
||||
bear_case: Optional[dict]
|
||||
debate: Optional[dict]
|
||||
risk: Optional[dict]
|
||||
final_decision: Optional[dict]
|
||||
|
||||
# Control
|
||||
hard_veto: bool
|
||||
hard_veto_reason: Optional[str]
|
||||
global_flags: Annotated[list, operator.add]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Legacy state types (preserved for backward compatibility)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class InvestDebateState(TypedDict):
|
||||
bull_history: Annotated[str, "Bullish Conversation history"]
|
||||
bear_history: Annotated[str, "Bearish Conversation history"]
|
||||
history: Annotated[str, "Conversation history"]
|
||||
current_response: Annotated[str, "Latest response"]
|
||||
judge_decision: Annotated[str, "Final judge decision"]
|
||||
count: Annotated[int, "Length of the current conversation"]
|
||||
|
||||
|
||||
class RiskDebateState(TypedDict):
|
||||
aggressive_history: Annotated[str, "Aggressive Agent's Conversation history"]
|
||||
conservative_history: Annotated[str, "Conservative Agent's Conversation history"]
|
||||
neutral_history: Annotated[str, "Neutral Agent's Conversation history"]
|
||||
history: Annotated[str, "Conversation history"]
|
||||
latest_speaker: Annotated[str, "Analyst that spoke last"]
|
||||
current_aggressive_response: Annotated[str, "Latest response by the aggressive analyst"]
|
||||
current_conservative_response: Annotated[str, "Latest response by the conservative analyst"]
|
||||
current_neutral_response: Annotated[str, "Latest response by the neutral analyst"]
|
||||
judge_decision: Annotated[str, "Judge's decision"]
|
||||
count: Annotated[int, "Length of the current conversation"]
|
||||
|
||||
|
||||
class AgentState(MessagesState):
|
||||
company_of_interest: Annotated[str, "Company that we are interested in trading"]
|
||||
trade_date: Annotated[str, "What date we are trading at"]
|
||||
sender: Annotated[str, "Agent that sent this message"]
|
||||
market_report: Annotated[str, "Report from the Market Analyst"]
|
||||
sentiment_report: Annotated[str, "Report from the Social Media Analyst"]
|
||||
news_report: Annotated[str, "Report from the News Researcher"]
|
||||
fundamentals_report: Annotated[str, "Report from the Fundamentals Researcher"]
|
||||
investment_debate_state: Annotated[InvestDebateState, "Current state of the investment debate"]
|
||||
investment_plan: Annotated[str, "Plan generated by the Analyst"]
|
||||
trader_investment_plan: Annotated[str, "Plan generated by the Trader"]
|
||||
risk_debate_state: Annotated[RiskDebateState, "Current state of the risk debate"]
|
||||
final_trade_decision: Annotated[str, "Final decision made by the Risk Analysts"]
|
||||
"""State definitions for the TradingAgents pipeline.
|
||||
|
||||
PipelineState is the new structured state used by the equity ranking engine.
|
||||
Legacy state types are preserved for backward compatibility.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import operator
|
||||
from typing import Annotated, Optional, Sequence
|
||||
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langgraph.graph import MessagesState
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# New structured pipeline state
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class PipelineState(TypedDict):
|
||||
"""Shared state for the structured equity ranking pipeline.
|
||||
|
||||
Each agent writes its output as a dict (Pydantic .model_dump()).
|
||||
The scoring node computes master_score/adjusted_score deterministically.
|
||||
global_flags uses operator.add to accumulate across all agents.
|
||||
"""
|
||||
ticker: str
|
||||
trade_date: str
|
||||
|
||||
# Tier 1
|
||||
validation: Optional[dict]
|
||||
company_card: Optional[dict]
|
||||
macro: Optional[dict]
|
||||
liquidity: Optional[dict]
|
||||
|
||||
# Tier 2
|
||||
sector_rotation: Optional[dict]
|
||||
business_quality: Optional[dict]
|
||||
institutional_flow: Optional[dict]
|
||||
valuation: Optional[dict]
|
||||
entry_timing: Optional[dict]
|
||||
earnings_revisions: Optional[dict]
|
||||
backlog: Optional[dict]
|
||||
crowding: Optional[dict]
|
||||
archetype: Optional[dict]
|
||||
|
||||
# Scoring (deterministic)
|
||||
master_score: Optional[float]
|
||||
adjusted_score: Optional[float]
|
||||
position_role: Optional[str]
|
||||
|
||||
# Portfolio-level
|
||||
theme_substitution: Optional[dict]
|
||||
position_replacement: Optional[dict]
|
||||
|
||||
# Tier 3
|
||||
bull_case: Optional[dict]
|
||||
bear_case: Optional[dict]
|
||||
debate: Optional[dict]
|
||||
risk: Optional[dict]
|
||||
final_decision: Optional[dict]
|
||||
|
||||
# Control
|
||||
hard_veto: bool
|
||||
hard_veto_reason: Optional[str]
|
||||
global_flags: Annotated[list, operator.add]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Legacy state types (preserved for backward compatibility)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class InvestDebateState(TypedDict):
|
||||
bull_history: Annotated[str, "Bullish Conversation history"]
|
||||
bear_history: Annotated[str, "Bearish Conversation history"]
|
||||
history: Annotated[str, "Conversation history"]
|
||||
current_response: Annotated[str, "Latest response"]
|
||||
judge_decision: Annotated[str, "Final judge decision"]
|
||||
count: Annotated[int, "Length of the current conversation"]
|
||||
|
||||
|
||||
class RiskDebateState(TypedDict):
|
||||
aggressive_history: Annotated[str, "Aggressive Agent's Conversation history"]
|
||||
conservative_history: Annotated[str, "Conservative Agent's Conversation history"]
|
||||
neutral_history: Annotated[str, "Neutral Agent's Conversation history"]
|
||||
history: Annotated[str, "Conversation history"]
|
||||
latest_speaker: Annotated[str, "Analyst that spoke last"]
|
||||
current_aggressive_response: Annotated[str, "Latest response by the aggressive analyst"]
|
||||
current_conservative_response: Annotated[str, "Latest response by the conservative analyst"]
|
||||
current_neutral_response: Annotated[str, "Latest response by the neutral analyst"]
|
||||
judge_decision: Annotated[str, "Judge's decision"]
|
||||
count: Annotated[int, "Length of the current conversation"]
|
||||
|
||||
|
||||
class AgentState(MessagesState):
|
||||
company_of_interest: Annotated[str, "Company that we are interested in trading"]
|
||||
trade_date: Annotated[str, "What date we are trading at"]
|
||||
sender: Annotated[str, "Agent that sent this message"]
|
||||
market_report: Annotated[str, "Report from the Market Analyst"]
|
||||
sentiment_report: Annotated[str, "Report from the Social Media Analyst"]
|
||||
news_report: Annotated[str, "Report from the News Researcher"]
|
||||
fundamentals_report: Annotated[str, "Report from the Fundamentals Researcher"]
|
||||
investment_debate_state: Annotated[InvestDebateState, "Current state of the investment debate"]
|
||||
investment_plan: Annotated[str, "Plan generated by the Analyst"]
|
||||
trader_investment_plan: Annotated[str, "Plan generated by the Trader"]
|
||||
risk_debate_state: Annotated[RiskDebateState, "Current state of the risk debate"]
|
||||
final_trade_decision: Annotated[str, "Final decision made by the Risk Analysts"]
|
||||
|
|
|
|||
|
|
@ -1,38 +1,38 @@
|
|||
from langchain_core.messages import HumanMessage, RemoveMessage
|
||||
|
||||
# Import tools from separate utility files
|
||||
from tradingagents.agents.utils.core_stock_tools import (
|
||||
get_stock_data
|
||||
)
|
||||
from tradingagents.agents.utils.technical_indicators_tools import (
|
||||
get_indicators
|
||||
)
|
||||
from tradingagents.agents.utils.fundamental_data_tools import (
|
||||
get_fundamentals,
|
||||
get_balance_sheet,
|
||||
get_cashflow,
|
||||
get_income_statement
|
||||
)
|
||||
from tradingagents.agents.utils.news_data_tools import (
|
||||
get_news,
|
||||
get_insider_transactions,
|
||||
get_global_news
|
||||
)
|
||||
|
||||
def create_msg_delete():
|
||||
def delete_messages(state):
|
||||
"""Clear messages and add placeholder for Anthropic compatibility"""
|
||||
messages = state["messages"]
|
||||
|
||||
# Remove all messages
|
||||
removal_operations = [RemoveMessage(id=m.id) for m in messages]
|
||||
|
||||
# Add a minimal placeholder message
|
||||
placeholder = HumanMessage(content="Continue")
|
||||
|
||||
return {"messages": removal_operations + [placeholder]}
|
||||
|
||||
return delete_messages
|
||||
|
||||
|
||||
from langchain_core.messages import HumanMessage, RemoveMessage
|
||||
|
||||
# Import tools from separate utility files
|
||||
from tradingagents.agents.utils.core_stock_tools import (
|
||||
get_stock_data
|
||||
)
|
||||
from tradingagents.agents.utils.technical_indicators_tools import (
|
||||
get_indicators
|
||||
)
|
||||
from tradingagents.agents.utils.fundamental_data_tools import (
|
||||
get_fundamentals,
|
||||
get_balance_sheet,
|
||||
get_cashflow,
|
||||
get_income_statement
|
||||
)
|
||||
from tradingagents.agents.utils.news_data_tools import (
|
||||
get_news,
|
||||
get_insider_transactions,
|
||||
get_global_news
|
||||
)
|
||||
|
||||
def create_msg_delete():
|
||||
def delete_messages(state):
|
||||
"""Clear messages and add placeholder for Anthropic compatibility"""
|
||||
messages = state["messages"]
|
||||
|
||||
# Remove all messages
|
||||
removal_operations = [RemoveMessage(id=m.id) for m in messages]
|
||||
|
||||
# Add a minimal placeholder message
|
||||
placeholder = HumanMessage(content="Continue")
|
||||
|
||||
return {"messages": removal_operations + [placeholder]}
|
||||
|
||||
return delete_messages
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,22 +1,22 @@
|
|||
from langchain_core.tools import tool
|
||||
from typing import Annotated
|
||||
from tradingagents.dataflows.interface import route_to_vendor
|
||||
|
||||
|
||||
@tool
|
||||
def get_stock_data(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve stock price data (OHLCV) for a given ticker symbol.
|
||||
Uses the configured core_stock_apis vendor.
|
||||
Args:
|
||||
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
|
||||
start_date (str): Start date in yyyy-mm-dd format
|
||||
end_date (str): End date in yyyy-mm-dd format
|
||||
Returns:
|
||||
str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range.
|
||||
"""
|
||||
return route_to_vendor("get_stock_data", symbol, start_date, end_date)
|
||||
from langchain_core.tools import tool
|
||||
from typing import Annotated
|
||||
from tradingagents.dataflows.interface import route_to_vendor
|
||||
|
||||
|
||||
@tool
|
||||
def get_stock_data(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve stock price data (OHLCV) for a given ticker symbol.
|
||||
Uses the configured core_stock_apis vendor.
|
||||
Args:
|
||||
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
|
||||
start_date (str): Start date in yyyy-mm-dd format
|
||||
end_date (str): End date in yyyy-mm-dd format
|
||||
Returns:
|
||||
str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range.
|
||||
"""
|
||||
return route_to_vendor("get_stock_data", symbol, start_date, end_date)
|
||||
|
|
|
|||
|
|
@ -1,77 +1,77 @@
|
|||
from langchain_core.tools import tool
|
||||
from typing import Annotated
|
||||
from tradingagents.dataflows.interface import route_to_vendor
|
||||
|
||||
|
||||
@tool
|
||||
def get_fundamentals(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve comprehensive fundamental data for a given ticker symbol.
|
||||
Uses the configured fundamental_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: A formatted report containing comprehensive fundamental data
|
||||
"""
|
||||
return route_to_vendor("get_fundamentals", ticker, curr_date)
|
||||
|
||||
|
||||
@tool
|
||||
def get_balance_sheet(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly",
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve balance sheet data for a given ticker symbol.
|
||||
Uses the configured fundamental_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly)
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: A formatted report containing balance sheet data
|
||||
"""
|
||||
return route_to_vendor("get_balance_sheet", ticker, freq, curr_date)
|
||||
|
||||
|
||||
@tool
|
||||
def get_cashflow(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly",
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve cash flow statement data for a given ticker symbol.
|
||||
Uses the configured fundamental_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly)
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: A formatted report containing cash flow statement data
|
||||
"""
|
||||
return route_to_vendor("get_cashflow", ticker, freq, curr_date)
|
||||
|
||||
|
||||
@tool
|
||||
def get_income_statement(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly",
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve income statement data for a given ticker symbol.
|
||||
Uses the configured fundamental_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly)
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: A formatted report containing income statement data
|
||||
"""
|
||||
from langchain_core.tools import tool
|
||||
from typing import Annotated
|
||||
from tradingagents.dataflows.interface import route_to_vendor
|
||||
|
||||
|
||||
@tool
|
||||
def get_fundamentals(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve comprehensive fundamental data for a given ticker symbol.
|
||||
Uses the configured fundamental_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: A formatted report containing comprehensive fundamental data
|
||||
"""
|
||||
return route_to_vendor("get_fundamentals", ticker, curr_date)
|
||||
|
||||
|
||||
@tool
|
||||
def get_balance_sheet(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly",
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve balance sheet data for a given ticker symbol.
|
||||
Uses the configured fundamental_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly)
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: A formatted report containing balance sheet data
|
||||
"""
|
||||
return route_to_vendor("get_balance_sheet", ticker, freq, curr_date)
|
||||
|
||||
|
||||
@tool
|
||||
def get_cashflow(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly",
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve cash flow statement data for a given ticker symbol.
|
||||
Uses the configured fundamental_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly)
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: A formatted report containing cash flow statement data
|
||||
"""
|
||||
return route_to_vendor("get_cashflow", ticker, freq, curr_date)
|
||||
|
||||
|
||||
@tool
|
||||
def get_income_statement(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly",
|
||||
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve income statement data for a given ticker symbol.
|
||||
Uses the configured fundamental_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly)
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd
|
||||
Returns:
|
||||
str: A formatted report containing income statement data
|
||||
"""
|
||||
return route_to_vendor("get_income_statement", ticker, freq, curr_date)
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,144 +1,144 @@
|
|||
"""Financial situation memory using BM25 for lexical similarity matching.
|
||||
|
||||
Uses BM25 (Best Matching 25) algorithm for retrieval - no API calls,
|
||||
no token limits, works offline with any LLM provider.
|
||||
"""
|
||||
|
||||
from rank_bm25 import BM25Okapi
|
||||
from typing import List, Tuple
|
||||
import re
|
||||
|
||||
|
||||
class FinancialSituationMemory:
|
||||
"""Memory system for storing and retrieving financial situations using BM25."""
|
||||
|
||||
def __init__(self, name: str, config: dict = None):
|
||||
"""Initialize the memory system.
|
||||
|
||||
Args:
|
||||
name: Name identifier for this memory instance
|
||||
config: Configuration dict (kept for API compatibility, not used for BM25)
|
||||
"""
|
||||
self.name = name
|
||||
self.documents: List[str] = []
|
||||
self.recommendations: List[str] = []
|
||||
self.bm25 = None
|
||||
|
||||
def _tokenize(self, text: str) -> List[str]:
|
||||
"""Tokenize text for BM25 indexing.
|
||||
|
||||
Simple whitespace + punctuation tokenization with lowercasing.
|
||||
"""
|
||||
# Lowercase and split on non-alphanumeric characters
|
||||
tokens = re.findall(r'\b\w+\b', text.lower())
|
||||
return tokens
|
||||
|
||||
def _rebuild_index(self):
|
||||
"""Rebuild the BM25 index after adding documents."""
|
||||
if self.documents:
|
||||
tokenized_docs = [self._tokenize(doc) for doc in self.documents]
|
||||
self.bm25 = BM25Okapi(tokenized_docs)
|
||||
else:
|
||||
self.bm25 = None
|
||||
|
||||
def add_situations(self, situations_and_advice: List[Tuple[str, str]]):
|
||||
"""Add financial situations and their corresponding advice.
|
||||
|
||||
Args:
|
||||
situations_and_advice: List of tuples (situation, recommendation)
|
||||
"""
|
||||
for situation, recommendation in situations_and_advice:
|
||||
self.documents.append(situation)
|
||||
self.recommendations.append(recommendation)
|
||||
|
||||
# Rebuild BM25 index with new documents
|
||||
self._rebuild_index()
|
||||
|
||||
def get_memories(self, current_situation: str, n_matches: int = 1) -> List[dict]:
|
||||
"""Find matching recommendations using BM25 similarity.
|
||||
|
||||
Args:
|
||||
current_situation: The current financial situation to match against
|
||||
n_matches: Number of top matches to return
|
||||
|
||||
Returns:
|
||||
List of dicts with matched_situation, recommendation, and similarity_score
|
||||
"""
|
||||
if not self.documents or self.bm25 is None:
|
||||
return []
|
||||
|
||||
# Tokenize query
|
||||
query_tokens = self._tokenize(current_situation)
|
||||
|
||||
# Get BM25 scores for all documents
|
||||
scores = self.bm25.get_scores(query_tokens)
|
||||
|
||||
# Get top-n indices sorted by score (descending)
|
||||
top_indices = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:n_matches]
|
||||
|
||||
# Build results
|
||||
results = []
|
||||
max_score = max(scores) if max(scores) > 0 else 1 # Normalize scores
|
||||
|
||||
for idx in top_indices:
|
||||
# Normalize score to 0-1 range for consistency
|
||||
normalized_score = scores[idx] / max_score if max_score > 0 else 0
|
||||
results.append({
|
||||
"matched_situation": self.documents[idx],
|
||||
"recommendation": self.recommendations[idx],
|
||||
"similarity_score": normalized_score,
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def clear(self):
|
||||
"""Clear all stored memories."""
|
||||
self.documents = []
|
||||
self.recommendations = []
|
||||
self.bm25 = None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example usage
|
||||
matcher = FinancialSituationMemory("test_memory")
|
||||
|
||||
# Example data
|
||||
example_data = [
|
||||
(
|
||||
"High inflation rate with rising interest rates and declining consumer spending",
|
||||
"Consider defensive sectors like consumer staples and utilities. Review fixed-income portfolio duration.",
|
||||
),
|
||||
(
|
||||
"Tech sector showing high volatility with increasing institutional selling pressure",
|
||||
"Reduce exposure to high-growth tech stocks. Look for value opportunities in established tech companies with strong cash flows.",
|
||||
),
|
||||
(
|
||||
"Strong dollar affecting emerging markets with increasing forex volatility",
|
||||
"Hedge currency exposure in international positions. Consider reducing allocation to emerging market debt.",
|
||||
),
|
||||
(
|
||||
"Market showing signs of sector rotation with rising yields",
|
||||
"Rebalance portfolio to maintain target allocations. Consider increasing exposure to sectors benefiting from higher rates.",
|
||||
),
|
||||
]
|
||||
|
||||
# Add the example situations and recommendations
|
||||
matcher.add_situations(example_data)
|
||||
|
||||
# Example query
|
||||
current_situation = """
|
||||
Market showing increased volatility in tech sector, with institutional investors
|
||||
reducing positions and rising interest rates affecting growth stock valuations
|
||||
"""
|
||||
|
||||
try:
|
||||
recommendations = matcher.get_memories(current_situation, n_matches=2)
|
||||
|
||||
for i, rec in enumerate(recommendations, 1):
|
||||
print(f"\nMatch {i}:")
|
||||
print(f"Similarity Score: {rec['similarity_score']:.2f}")
|
||||
print(f"Matched Situation: {rec['matched_situation']}")
|
||||
print(f"Recommendation: {rec['recommendation']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during recommendation: {str(e)}")
|
||||
"""Financial situation memory using BM25 for lexical similarity matching.
|
||||
|
||||
Uses BM25 (Best Matching 25) algorithm for retrieval - no API calls,
|
||||
no token limits, works offline with any LLM provider.
|
||||
"""
|
||||
|
||||
from rank_bm25 import BM25Okapi
|
||||
from typing import List, Tuple
|
||||
import re
|
||||
|
||||
|
||||
class FinancialSituationMemory:
|
||||
"""Memory system for storing and retrieving financial situations using BM25."""
|
||||
|
||||
def __init__(self, name: str, config: dict = None):
|
||||
"""Initialize the memory system.
|
||||
|
||||
Args:
|
||||
name: Name identifier for this memory instance
|
||||
config: Configuration dict (kept for API compatibility, not used for BM25)
|
||||
"""
|
||||
self.name = name
|
||||
self.documents: List[str] = []
|
||||
self.recommendations: List[str] = []
|
||||
self.bm25 = None
|
||||
|
||||
def _tokenize(self, text: str) -> List[str]:
|
||||
"""Tokenize text for BM25 indexing.
|
||||
|
||||
Simple whitespace + punctuation tokenization with lowercasing.
|
||||
"""
|
||||
# Lowercase and split on non-alphanumeric characters
|
||||
tokens = re.findall(r'\b\w+\b', text.lower())
|
||||
return tokens
|
||||
|
||||
def _rebuild_index(self):
|
||||
"""Rebuild the BM25 index after adding documents."""
|
||||
if self.documents:
|
||||
tokenized_docs = [self._tokenize(doc) for doc in self.documents]
|
||||
self.bm25 = BM25Okapi(tokenized_docs)
|
||||
else:
|
||||
self.bm25 = None
|
||||
|
||||
def add_situations(self, situations_and_advice: List[Tuple[str, str]]):
|
||||
"""Add financial situations and their corresponding advice.
|
||||
|
||||
Args:
|
||||
situations_and_advice: List of tuples (situation, recommendation)
|
||||
"""
|
||||
for situation, recommendation in situations_and_advice:
|
||||
self.documents.append(situation)
|
||||
self.recommendations.append(recommendation)
|
||||
|
||||
# Rebuild BM25 index with new documents
|
||||
self._rebuild_index()
|
||||
|
||||
def get_memories(self, current_situation: str, n_matches: int = 1) -> List[dict]:
|
||||
"""Find matching recommendations using BM25 similarity.
|
||||
|
||||
Args:
|
||||
current_situation: The current financial situation to match against
|
||||
n_matches: Number of top matches to return
|
||||
|
||||
Returns:
|
||||
List of dicts with matched_situation, recommendation, and similarity_score
|
||||
"""
|
||||
if not self.documents or self.bm25 is None:
|
||||
return []
|
||||
|
||||
# Tokenize query
|
||||
query_tokens = self._tokenize(current_situation)
|
||||
|
||||
# Get BM25 scores for all documents
|
||||
scores = self.bm25.get_scores(query_tokens)
|
||||
|
||||
# Get top-n indices sorted by score (descending)
|
||||
top_indices = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:n_matches]
|
||||
|
||||
# Build results
|
||||
results = []
|
||||
max_score = max(scores) if max(scores) > 0 else 1 # Normalize scores
|
||||
|
||||
for idx in top_indices:
|
||||
# Normalize score to 0-1 range for consistency
|
||||
normalized_score = scores[idx] / max_score if max_score > 0 else 0
|
||||
results.append({
|
||||
"matched_situation": self.documents[idx],
|
||||
"recommendation": self.recommendations[idx],
|
||||
"similarity_score": normalized_score,
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def clear(self):
|
||||
"""Clear all stored memories."""
|
||||
self.documents = []
|
||||
self.recommendations = []
|
||||
self.bm25 = None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example usage
|
||||
matcher = FinancialSituationMemory("test_memory")
|
||||
|
||||
# Example data
|
||||
example_data = [
|
||||
(
|
||||
"High inflation rate with rising interest rates and declining consumer spending",
|
||||
"Consider defensive sectors like consumer staples and utilities. Review fixed-income portfolio duration.",
|
||||
),
|
||||
(
|
||||
"Tech sector showing high volatility with increasing institutional selling pressure",
|
||||
"Reduce exposure to high-growth tech stocks. Look for value opportunities in established tech companies with strong cash flows.",
|
||||
),
|
||||
(
|
||||
"Strong dollar affecting emerging markets with increasing forex volatility",
|
||||
"Hedge currency exposure in international positions. Consider reducing allocation to emerging market debt.",
|
||||
),
|
||||
(
|
||||
"Market showing signs of sector rotation with rising yields",
|
||||
"Rebalance portfolio to maintain target allocations. Consider increasing exposure to sectors benefiting from higher rates.",
|
||||
),
|
||||
]
|
||||
|
||||
# Add the example situations and recommendations
|
||||
matcher.add_situations(example_data)
|
||||
|
||||
# Example query
|
||||
current_situation = """
|
||||
Market showing increased volatility in tech sector, with institutional investors
|
||||
reducing positions and rising interest rates affecting growth stock valuations
|
||||
"""
|
||||
|
||||
try:
|
||||
recommendations = matcher.get_memories(current_situation, n_matches=2)
|
||||
|
||||
for i, rec in enumerate(recommendations, 1):
|
||||
print(f"\nMatch {i}:")
|
||||
print(f"Similarity Score: {rec['similarity_score']:.2f}")
|
||||
print(f"Matched Situation: {rec['matched_situation']}")
|
||||
print(f"Recommendation: {rec['recommendation']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during recommendation: {str(e)}")
|
||||
|
|
|
|||
|
|
@ -1,53 +1,53 @@
|
|||
from langchain_core.tools import tool
|
||||
from typing import Annotated
|
||||
from tradingagents.dataflows.interface import route_to_vendor
|
||||
|
||||
@tool
|
||||
def get_news(
|
||||
ticker: Annotated[str, "Ticker symbol"],
|
||||
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve news data for a given ticker symbol.
|
||||
Uses the configured news_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol
|
||||
start_date (str): Start date in yyyy-mm-dd format
|
||||
end_date (str): End date in yyyy-mm-dd format
|
||||
Returns:
|
||||
str: A formatted string containing news data
|
||||
"""
|
||||
return route_to_vendor("get_news", ticker, start_date, end_date)
|
||||
|
||||
@tool
|
||||
def get_global_news(
|
||||
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
|
||||
look_back_days: Annotated[int, "Number of days to look back"] = 7,
|
||||
limit: Annotated[int, "Maximum number of articles to return"] = 5,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve global news data.
|
||||
Uses the configured news_data vendor.
|
||||
Args:
|
||||
curr_date (str): Current date in yyyy-mm-dd format
|
||||
look_back_days (int): Number of days to look back (default 7)
|
||||
limit (int): Maximum number of articles to return (default 5)
|
||||
Returns:
|
||||
str: A formatted string containing global news data
|
||||
"""
|
||||
return route_to_vendor("get_global_news", curr_date, look_back_days, limit)
|
||||
|
||||
@tool
|
||||
def get_insider_transactions(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve insider transaction information about a company.
|
||||
Uses the configured news_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
Returns:
|
||||
str: A report of insider transaction data
|
||||
"""
|
||||
return route_to_vendor("get_insider_transactions", ticker)
|
||||
from langchain_core.tools import tool
|
||||
from typing import Annotated
|
||||
from tradingagents.dataflows.interface import route_to_vendor
|
||||
|
||||
@tool
|
||||
def get_news(
|
||||
ticker: Annotated[str, "Ticker symbol"],
|
||||
start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
|
||||
end_date: Annotated[str, "End date in yyyy-mm-dd format"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve news data for a given ticker symbol.
|
||||
Uses the configured news_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol
|
||||
start_date (str): Start date in yyyy-mm-dd format
|
||||
end_date (str): End date in yyyy-mm-dd format
|
||||
Returns:
|
||||
str: A formatted string containing news data
|
||||
"""
|
||||
return route_to_vendor("get_news", ticker, start_date, end_date)
|
||||
|
||||
@tool
|
||||
def get_global_news(
|
||||
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
|
||||
look_back_days: Annotated[int, "Number of days to look back"] = 7,
|
||||
limit: Annotated[int, "Maximum number of articles to return"] = 5,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve global news data.
|
||||
Uses the configured news_data vendor.
|
||||
Args:
|
||||
curr_date (str): Current date in yyyy-mm-dd format
|
||||
look_back_days (int): Number of days to look back (default 7)
|
||||
limit (int): Maximum number of articles to return (default 5)
|
||||
Returns:
|
||||
str: A formatted string containing global news data
|
||||
"""
|
||||
return route_to_vendor("get_global_news", curr_date, look_back_days, limit)
|
||||
|
||||
@tool
|
||||
def get_insider_transactions(
|
||||
ticker: Annotated[str, "ticker symbol"],
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve insider transaction information about a company.
|
||||
Uses the configured news_data vendor.
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
Returns:
|
||||
str: A report of insider transaction data
|
||||
"""
|
||||
return route_to_vendor("get_insider_transactions", ticker)
|
||||
|
|
|
|||
|
|
@ -1,26 +1,26 @@
|
|||
from langchain_core.tools import tool
|
||||
from typing import Annotated
|
||||
from tradingagents.dataflows.interface import route_to_vendor
|
||||
|
||||
@tool
|
||||
def get_indicators(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
indicator: Annotated[str, "technical indicator to get the analysis and report of"],
|
||||
curr_date: Annotated[str, "The current trading date you are trading on, YYYY-mm-dd"],
|
||||
look_back_days: Annotated[int, "how many days to look back"] = 30,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve technical indicators for a given ticker symbol.
|
||||
Uses the configured technical_indicators vendor.
|
||||
Args:
|
||||
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
|
||||
indicator (str): Technical indicator. Supported: close_50_sma, close_200_sma, close_10_ema, macd, macds, macdh, rsi, boll, boll_ub, boll_lb, atr, vwma, mfi
|
||||
curr_date (str): The current trading date you are trading on, YYYY-mm-dd
|
||||
look_back_days (int): How many days to look back, default is 30
|
||||
Returns:
|
||||
str: A formatted dataframe containing the technical indicators for the specified ticker symbol and indicator.
|
||||
"""
|
||||
try:
|
||||
return route_to_vendor("get_indicators", symbol, indicator, curr_date, look_back_days)
|
||||
except ValueError as e:
|
||||
from langchain_core.tools import tool
|
||||
from typing import Annotated
|
||||
from tradingagents.dataflows.interface import route_to_vendor
|
||||
|
||||
@tool
|
||||
def get_indicators(
|
||||
symbol: Annotated[str, "ticker symbol of the company"],
|
||||
indicator: Annotated[str, "technical indicator to get the analysis and report of"],
|
||||
curr_date: Annotated[str, "The current trading date you are trading on, YYYY-mm-dd"],
|
||||
look_back_days: Annotated[int, "how many days to look back"] = 30,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve technical indicators for a given ticker symbol.
|
||||
Uses the configured technical_indicators vendor.
|
||||
Args:
|
||||
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
|
||||
indicator (str): Technical indicator. Supported: close_50_sma, close_200_sma, close_10_ema, macd, macds, macdh, rsi, boll, boll_ub, boll_lb, atr, vwma, mfi
|
||||
curr_date (str): The current trading date you are trading on, YYYY-mm-dd
|
||||
look_back_days (int): How many days to look back, default is 30
|
||||
Returns:
|
||||
str: A formatted dataframe containing the technical indicators for the specified ticker symbol and indicator.
|
||||
"""
|
||||
try:
|
||||
return route_to_vendor("get_indicators", symbol, indicator, curr_date, look_back_days)
|
||||
except ValueError as e:
|
||||
return str(e)
|
||||
|
|
@ -1,321 +1,321 @@
|
|||
"""Alpaca Market Data API client for the equity ranking engine.
|
||||
|
||||
Provides price bars, snapshots, and news. Fundamentals still come from yfinance.
|
||||
Free tier: 10,000 requests/min, up to 7 years of historical data.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import pandas as pd
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Client setup (lazy init)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_stock_client = None
|
||||
_news_client = None
|
||||
|
||||
|
||||
def _get_stock_client():
|
||||
global _stock_client
|
||||
if _stock_client is None:
|
||||
from alpaca.data.historical import StockHistoricalDataClient
|
||||
|
||||
key = os.environ.get("ALPACA_API_KEY", "")
|
||||
secret = os.environ.get("ALPACA_API_SECRET", "")
|
||||
if not key or not secret:
|
||||
raise RuntimeError(
|
||||
"ALPACA_API_KEY and ALPACA_API_SECRET must be set"
|
||||
)
|
||||
_stock_client = StockHistoricalDataClient(key, secret)
|
||||
return _stock_client
|
||||
|
||||
|
||||
def _get_news_client():
|
||||
global _news_client
|
||||
if _news_client is None:
|
||||
from alpaca.data.historical.news import NewsClient
|
||||
|
||||
key = os.environ.get("ALPACA_API_KEY", "")
|
||||
secret = os.environ.get("ALPACA_API_SECRET", "")
|
||||
_news_client = NewsClient(key, secret)
|
||||
return _news_client
|
||||
|
||||
|
||||
def alpaca_available() -> bool:
|
||||
"""Check if Alpaca credentials are configured."""
|
||||
return bool(
|
||||
os.environ.get("ALPACA_API_KEY")
|
||||
and os.environ.get("ALPACA_API_SECRET")
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Price / Bar data
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_bars(
|
||||
symbol: str,
|
||||
start_date: str,
|
||||
end_date: str,
|
||||
timeframe: str = "1Day",
|
||||
) -> pd.DataFrame:
|
||||
"""Fetch historical bars from Alpaca.
|
||||
|
||||
Args:
|
||||
symbol: Ticker symbol (e.g., "AAPL")
|
||||
start_date: Start date in YYYY-MM-DD format
|
||||
end_date: End date in YYYY-MM-DD format
|
||||
timeframe: "1Min", "5Min", "15Min", "1Hour", "1Day", "1Week", "1Month"
|
||||
|
||||
Returns:
|
||||
DataFrame with OHLCV columns.
|
||||
"""
|
||||
from alpaca.data.requests import StockBarsRequest
|
||||
from alpaca.data.timeframe import TimeFrame, TimeFrameUnit
|
||||
|
||||
tf_map = {
|
||||
"1Min": TimeFrame(1, TimeFrameUnit.Minute),
|
||||
"5Min": TimeFrame(5, TimeFrameUnit.Minute),
|
||||
"15Min": TimeFrame(15, TimeFrameUnit.Minute),
|
||||
"1Hour": TimeFrame(1, TimeFrameUnit.Hour),
|
||||
"1Day": TimeFrame(1, TimeFrameUnit.Day),
|
||||
"1Week": TimeFrame(1, TimeFrameUnit.Week),
|
||||
"1Month": TimeFrame(1, TimeFrameUnit.Month),
|
||||
}
|
||||
tf = tf_map.get(timeframe, TimeFrame(1, TimeFrameUnit.Day))
|
||||
|
||||
client = _get_stock_client()
|
||||
request = StockBarsRequest(
|
||||
symbol_or_symbols=symbol.upper(),
|
||||
timeframe=tf,
|
||||
start=datetime.strptime(start_date, "%Y-%m-%d"),
|
||||
end=datetime.strptime(end_date, "%Y-%m-%d"),
|
||||
feed="iex",
|
||||
)
|
||||
bars = client.get_stock_bars(request)
|
||||
df = bars.df
|
||||
if isinstance(df.index, pd.MultiIndex):
|
||||
df = df.droplevel("symbol")
|
||||
return df
|
||||
|
||||
|
||||
def get_bars_csv(symbol: str, start_date: str, end_date: str) -> str:
|
||||
"""Fetch historical bars and return as CSV string (drop-in for get_YFin_data_online)."""
|
||||
try:
|
||||
df = get_bars(symbol, start_date, end_date)
|
||||
if df.empty:
|
||||
return f"No data found for '{symbol}' between {start_date} and {end_date}"
|
||||
|
||||
# Rename columns to match yfinance output format
|
||||
df = df.rename(columns={
|
||||
"open": "Open", "high": "High", "low": "Low",
|
||||
"close": "Close", "volume": "Volume",
|
||||
"trade_count": "Trade Count", "vwap": "VWAP",
|
||||
})
|
||||
for col in ["Open", "High", "Low", "Close"]:
|
||||
if col in df.columns:
|
||||
df[col] = df[col].round(2)
|
||||
|
||||
if df.index.tz is not None:
|
||||
df.index = df.index.tz_localize(None)
|
||||
|
||||
csv = df.to_csv()
|
||||
header = (
|
||||
f"# Stock data for {symbol.upper()} from {start_date} to {end_date}\n"
|
||||
f"# Source: Alpaca Markets (IEX feed)\n"
|
||||
f"# Total records: {len(df)}\n\n"
|
||||
)
|
||||
return header + csv
|
||||
except Exception as e:
|
||||
logger.warning("Alpaca bars failed for %s: %s", symbol, e)
|
||||
return f"Error fetching Alpaca data for {symbol}: {e}"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Snapshots (latest quote/trade)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_snapshot(symbol: str) -> Dict[str, Any]:
|
||||
"""Get the latest snapshot (quote + trade + bar) for a symbol."""
|
||||
from alpaca.data.requests import StockSnapshotRequest
|
||||
|
||||
client = _get_stock_client()
|
||||
request = StockSnapshotRequest(symbol_or_symbols=symbol.upper(), feed="iex")
|
||||
snapshots = client.get_stock_snapshot(request)
|
||||
snap = snapshots.get(symbol.upper())
|
||||
if not snap:
|
||||
return {}
|
||||
|
||||
result = {
|
||||
"ticker": symbol.upper(),
|
||||
"latest_trade_price": snap.latest_trade.price if snap.latest_trade else None,
|
||||
"latest_trade_size": snap.latest_trade.size if snap.latest_trade else None,
|
||||
"latest_trade_time": str(snap.latest_trade.timestamp) if snap.latest_trade else None,
|
||||
}
|
||||
if snap.latest_quote:
|
||||
result["bid"] = snap.latest_quote.bid_price
|
||||
result["ask"] = snap.latest_quote.ask_price
|
||||
result["bid_size"] = snap.latest_quote.bid_size
|
||||
result["ask_size"] = snap.latest_quote.ask_size
|
||||
if snap.daily_bar:
|
||||
result["daily_open"] = snap.daily_bar.open
|
||||
result["daily_high"] = snap.daily_bar.high
|
||||
result["daily_low"] = snap.daily_bar.low
|
||||
result["daily_close"] = snap.daily_bar.close
|
||||
result["daily_volume"] = snap.daily_bar.volume
|
||||
result["daily_vwap"] = snap.daily_bar.vwap
|
||||
if snap.previous_daily_bar:
|
||||
result["prev_close"] = snap.previous_daily_bar.close
|
||||
result["prev_volume"] = snap.previous_daily_bar.volume
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_multi_snapshots(symbols: List[str]) -> Dict[str, Dict[str, Any]]:
|
||||
"""Get snapshots for multiple symbols at once."""
|
||||
from alpaca.data.requests import StockSnapshotRequest
|
||||
|
||||
client = _get_stock_client()
|
||||
request = StockSnapshotRequest(
|
||||
symbol_or_symbols=[s.upper() for s in symbols],
|
||||
feed="iex",
|
||||
)
|
||||
snapshots = client.get_stock_snapshot(request)
|
||||
result = {}
|
||||
for sym, snap in snapshots.items():
|
||||
entry = {"ticker": sym}
|
||||
if snap.latest_trade:
|
||||
entry["price"] = snap.latest_trade.price
|
||||
if snap.daily_bar:
|
||||
entry["daily_open"] = snap.daily_bar.open
|
||||
entry["daily_high"] = snap.daily_bar.high
|
||||
entry["daily_low"] = snap.daily_bar.low
|
||||
entry["daily_close"] = snap.daily_bar.close
|
||||
entry["daily_volume"] = snap.daily_bar.volume
|
||||
entry["daily_vwap"] = snap.daily_bar.vwap
|
||||
if snap.previous_daily_bar:
|
||||
entry["prev_close"] = snap.previous_daily_bar.close
|
||||
result[sym] = entry
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Computed indicators from bars
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_moving_averages(symbol: str) -> Dict[str, Any]:
|
||||
"""Compute 50-day and 200-day moving averages from Alpaca bars."""
|
||||
end = datetime.now()
|
||||
start = end - timedelta(days=300) # ~200 trading days + buffer
|
||||
|
||||
try:
|
||||
df = get_bars(symbol, start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"))
|
||||
if df.empty or len(df) < 50:
|
||||
return {}
|
||||
|
||||
close = df["close"] if "close" in df.columns else df["Close"]
|
||||
result = {
|
||||
"current_price": float(close.iloc[-1]),
|
||||
"fifty_day_avg": float(close.tail(50).mean()),
|
||||
}
|
||||
if len(close) >= 200:
|
||||
result["two_hundred_day_avg"] = float(close.tail(200).mean())
|
||||
|
||||
# 52-week high/low (approx 252 trading days)
|
||||
year_data = close.tail(252) if len(close) >= 252 else close
|
||||
result["fifty_two_week_high"] = float(year_data.max())
|
||||
result["fifty_two_week_low"] = float(year_data.min())
|
||||
|
||||
hi = result["fifty_two_week_high"]
|
||||
lo = result["fifty_two_week_low"]
|
||||
price = result["current_price"]
|
||||
if (hi - lo) > 0:
|
||||
result["vs_52w_range_pct"] = round((price - lo) / (hi - lo) * 100, 1)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.warning("Alpaca moving averages failed for %s: %s", symbol, e)
|
||||
return {}
|
||||
|
||||
|
||||
def get_sector_etf_performance(etf_symbols: List[str]) -> Dict[str, Dict[str, float]]:
|
||||
"""Compute 1M and 3M returns for a list of sector ETFs."""
|
||||
end = datetime.now()
|
||||
start_3m = end - timedelta(days=100)
|
||||
|
||||
result = {}
|
||||
for sym in etf_symbols:
|
||||
try:
|
||||
df = get_bars(sym, start_3m.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"))
|
||||
if df.empty or len(df) < 5:
|
||||
continue
|
||||
close = df["close"] if "close" in df.columns else df["Close"]
|
||||
current = float(close.iloc[-1])
|
||||
|
||||
ret_1m = None
|
||||
if len(close) >= 22:
|
||||
price_1m = float(close.iloc[-22])
|
||||
ret_1m = round((current - price_1m) / price_1m * 100, 2)
|
||||
|
||||
ret_3m = None
|
||||
if len(close) >= 63:
|
||||
price_3m = float(close.iloc[-63])
|
||||
ret_3m = round((current - price_3m) / price_3m * 100, 2)
|
||||
|
||||
result[sym] = {
|
||||
"return_1m": ret_1m,
|
||||
"return_3m": ret_3m,
|
||||
"price": current,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("Alpaca ETF perf failed for %s: %s", sym, e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# News
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_news(
|
||||
symbols: Optional[List[str]] = None,
|
||||
limit: int = 10,
|
||||
start_date: Optional[str] = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Fetch news articles from Alpaca News API."""
|
||||
try:
|
||||
from alpaca.data.requests import NewsRequest
|
||||
|
||||
client = _get_news_client()
|
||||
kwargs: Dict[str, Any] = {"limit": limit}
|
||||
if symbols:
|
||||
kwargs["symbols"] = [s.upper() for s in symbols]
|
||||
if start_date:
|
||||
kwargs["start"] = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
|
||||
request = NewsRequest(**kwargs)
|
||||
news = client.get_news(request)
|
||||
|
||||
return [
|
||||
{
|
||||
"title": n.headline,
|
||||
"summary": n.summary or "",
|
||||
"url": n.url,
|
||||
"source": n.source,
|
||||
"created_at": str(n.created_at),
|
||||
"symbols": n.symbols or [],
|
||||
}
|
||||
for n in news.news
|
||||
]
|
||||
except Exception as e:
|
||||
logger.warning("Alpaca news failed: %s", e)
|
||||
return []
|
||||
"""Alpaca Market Data API client for the equity ranking engine.
|
||||
|
||||
Provides price bars, snapshots, and news. Fundamentals still come from yfinance.
|
||||
Free tier: 10,000 requests/min, up to 7 years of historical data.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import pandas as pd
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Client setup (lazy init)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_stock_client = None
|
||||
_news_client = None
|
||||
|
||||
|
||||
def _get_stock_client():
|
||||
global _stock_client
|
||||
if _stock_client is None:
|
||||
from alpaca.data.historical import StockHistoricalDataClient
|
||||
|
||||
key = os.environ.get("ALPACA_API_KEY", "")
|
||||
secret = os.environ.get("ALPACA_API_SECRET", "")
|
||||
if not key or not secret:
|
||||
raise RuntimeError(
|
||||
"ALPACA_API_KEY and ALPACA_API_SECRET must be set"
|
||||
)
|
||||
_stock_client = StockHistoricalDataClient(key, secret)
|
||||
return _stock_client
|
||||
|
||||
|
||||
def _get_news_client():
|
||||
global _news_client
|
||||
if _news_client is None:
|
||||
from alpaca.data.historical.news import NewsClient
|
||||
|
||||
key = os.environ.get("ALPACA_API_KEY", "")
|
||||
secret = os.environ.get("ALPACA_API_SECRET", "")
|
||||
_news_client = NewsClient(key, secret)
|
||||
return _news_client
|
||||
|
||||
|
||||
def alpaca_available() -> bool:
|
||||
"""Check if Alpaca credentials are configured."""
|
||||
return bool(
|
||||
os.environ.get("ALPACA_API_KEY")
|
||||
and os.environ.get("ALPACA_API_SECRET")
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Price / Bar data
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_bars(
|
||||
symbol: str,
|
||||
start_date: str,
|
||||
end_date: str,
|
||||
timeframe: str = "1Day",
|
||||
) -> pd.DataFrame:
|
||||
"""Fetch historical bars from Alpaca.
|
||||
|
||||
Args:
|
||||
symbol: Ticker symbol (e.g., "AAPL")
|
||||
start_date: Start date in YYYY-MM-DD format
|
||||
end_date: End date in YYYY-MM-DD format
|
||||
timeframe: "1Min", "5Min", "15Min", "1Hour", "1Day", "1Week", "1Month"
|
||||
|
||||
Returns:
|
||||
DataFrame with OHLCV columns.
|
||||
"""
|
||||
from alpaca.data.requests import StockBarsRequest
|
||||
from alpaca.data.timeframe import TimeFrame, TimeFrameUnit
|
||||
|
||||
tf_map = {
|
||||
"1Min": TimeFrame(1, TimeFrameUnit.Minute),
|
||||
"5Min": TimeFrame(5, TimeFrameUnit.Minute),
|
||||
"15Min": TimeFrame(15, TimeFrameUnit.Minute),
|
||||
"1Hour": TimeFrame(1, TimeFrameUnit.Hour),
|
||||
"1Day": TimeFrame(1, TimeFrameUnit.Day),
|
||||
"1Week": TimeFrame(1, TimeFrameUnit.Week),
|
||||
"1Month": TimeFrame(1, TimeFrameUnit.Month),
|
||||
}
|
||||
tf = tf_map.get(timeframe, TimeFrame(1, TimeFrameUnit.Day))
|
||||
|
||||
client = _get_stock_client()
|
||||
request = StockBarsRequest(
|
||||
symbol_or_symbols=symbol.upper(),
|
||||
timeframe=tf,
|
||||
start=datetime.strptime(start_date, "%Y-%m-%d"),
|
||||
end=datetime.strptime(end_date, "%Y-%m-%d"),
|
||||
feed="iex",
|
||||
)
|
||||
bars = client.get_stock_bars(request)
|
||||
df = bars.df
|
||||
if isinstance(df.index, pd.MultiIndex):
|
||||
df = df.droplevel("symbol")
|
||||
return df
|
||||
|
||||
|
||||
def get_bars_csv(symbol: str, start_date: str, end_date: str) -> str:
|
||||
"""Fetch historical bars and return as CSV string (drop-in for get_YFin_data_online)."""
|
||||
try:
|
||||
df = get_bars(symbol, start_date, end_date)
|
||||
if df.empty:
|
||||
return f"No data found for '{symbol}' between {start_date} and {end_date}"
|
||||
|
||||
# Rename columns to match yfinance output format
|
||||
df = df.rename(columns={
|
||||
"open": "Open", "high": "High", "low": "Low",
|
||||
"close": "Close", "volume": "Volume",
|
||||
"trade_count": "Trade Count", "vwap": "VWAP",
|
||||
})
|
||||
for col in ["Open", "High", "Low", "Close"]:
|
||||
if col in df.columns:
|
||||
df[col] = df[col].round(2)
|
||||
|
||||
if df.index.tz is not None:
|
||||
df.index = df.index.tz_localize(None)
|
||||
|
||||
csv = df.to_csv()
|
||||
header = (
|
||||
f"# Stock data for {symbol.upper()} from {start_date} to {end_date}\n"
|
||||
f"# Source: Alpaca Markets (IEX feed)\n"
|
||||
f"# Total records: {len(df)}\n\n"
|
||||
)
|
||||
return header + csv
|
||||
except Exception as e:
|
||||
logger.warning("Alpaca bars failed for %s: %s", symbol, e)
|
||||
return f"Error fetching Alpaca data for {symbol}: {e}"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Snapshots (latest quote/trade)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_snapshot(symbol: str) -> Dict[str, Any]:
|
||||
"""Get the latest snapshot (quote + trade + bar) for a symbol."""
|
||||
from alpaca.data.requests import StockSnapshotRequest
|
||||
|
||||
client = _get_stock_client()
|
||||
request = StockSnapshotRequest(symbol_or_symbols=symbol.upper(), feed="iex")
|
||||
snapshots = client.get_stock_snapshot(request)
|
||||
snap = snapshots.get(symbol.upper())
|
||||
if not snap:
|
||||
return {}
|
||||
|
||||
result = {
|
||||
"ticker": symbol.upper(),
|
||||
"latest_trade_price": snap.latest_trade.price if snap.latest_trade else None,
|
||||
"latest_trade_size": snap.latest_trade.size if snap.latest_trade else None,
|
||||
"latest_trade_time": str(snap.latest_trade.timestamp) if snap.latest_trade else None,
|
||||
}
|
||||
if snap.latest_quote:
|
||||
result["bid"] = snap.latest_quote.bid_price
|
||||
result["ask"] = snap.latest_quote.ask_price
|
||||
result["bid_size"] = snap.latest_quote.bid_size
|
||||
result["ask_size"] = snap.latest_quote.ask_size
|
||||
if snap.daily_bar:
|
||||
result["daily_open"] = snap.daily_bar.open
|
||||
result["daily_high"] = snap.daily_bar.high
|
||||
result["daily_low"] = snap.daily_bar.low
|
||||
result["daily_close"] = snap.daily_bar.close
|
||||
result["daily_volume"] = snap.daily_bar.volume
|
||||
result["daily_vwap"] = snap.daily_bar.vwap
|
||||
if snap.previous_daily_bar:
|
||||
result["prev_close"] = snap.previous_daily_bar.close
|
||||
result["prev_volume"] = snap.previous_daily_bar.volume
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_multi_snapshots(symbols: List[str]) -> Dict[str, Dict[str, Any]]:
|
||||
"""Get snapshots for multiple symbols at once."""
|
||||
from alpaca.data.requests import StockSnapshotRequest
|
||||
|
||||
client = _get_stock_client()
|
||||
request = StockSnapshotRequest(
|
||||
symbol_or_symbols=[s.upper() for s in symbols],
|
||||
feed="iex",
|
||||
)
|
||||
snapshots = client.get_stock_snapshot(request)
|
||||
result = {}
|
||||
for sym, snap in snapshots.items():
|
||||
entry = {"ticker": sym}
|
||||
if snap.latest_trade:
|
||||
entry["price"] = snap.latest_trade.price
|
||||
if snap.daily_bar:
|
||||
entry["daily_open"] = snap.daily_bar.open
|
||||
entry["daily_high"] = snap.daily_bar.high
|
||||
entry["daily_low"] = snap.daily_bar.low
|
||||
entry["daily_close"] = snap.daily_bar.close
|
||||
entry["daily_volume"] = snap.daily_bar.volume
|
||||
entry["daily_vwap"] = snap.daily_bar.vwap
|
||||
if snap.previous_daily_bar:
|
||||
entry["prev_close"] = snap.previous_daily_bar.close
|
||||
result[sym] = entry
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Computed indicators from bars
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_moving_averages(symbol: str) -> Dict[str, Any]:
|
||||
"""Compute 50-day and 200-day moving averages from Alpaca bars."""
|
||||
end = datetime.now()
|
||||
start = end - timedelta(days=300) # ~200 trading days + buffer
|
||||
|
||||
try:
|
||||
df = get_bars(symbol, start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"))
|
||||
if df.empty or len(df) < 50:
|
||||
return {}
|
||||
|
||||
close = df["close"] if "close" in df.columns else df["Close"]
|
||||
result = {
|
||||
"current_price": float(close.iloc[-1]),
|
||||
"fifty_day_avg": float(close.tail(50).mean()),
|
||||
}
|
||||
if len(close) >= 200:
|
||||
result["two_hundred_day_avg"] = float(close.tail(200).mean())
|
||||
|
||||
# 52-week high/low (approx 252 trading days)
|
||||
year_data = close.tail(252) if len(close) >= 252 else close
|
||||
result["fifty_two_week_high"] = float(year_data.max())
|
||||
result["fifty_two_week_low"] = float(year_data.min())
|
||||
|
||||
hi = result["fifty_two_week_high"]
|
||||
lo = result["fifty_two_week_low"]
|
||||
price = result["current_price"]
|
||||
if (hi - lo) > 0:
|
||||
result["vs_52w_range_pct"] = round((price - lo) / (hi - lo) * 100, 1)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.warning("Alpaca moving averages failed for %s: %s", symbol, e)
|
||||
return {}
|
||||
|
||||
|
||||
def get_sector_etf_performance(etf_symbols: List[str]) -> Dict[str, Dict[str, float]]:
|
||||
"""Compute 1M and 3M returns for a list of sector ETFs."""
|
||||
end = datetime.now()
|
||||
start_3m = end - timedelta(days=100)
|
||||
|
||||
result = {}
|
||||
for sym in etf_symbols:
|
||||
try:
|
||||
df = get_bars(sym, start_3m.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"))
|
||||
if df.empty or len(df) < 5:
|
||||
continue
|
||||
close = df["close"] if "close" in df.columns else df["Close"]
|
||||
current = float(close.iloc[-1])
|
||||
|
||||
ret_1m = None
|
||||
if len(close) >= 22:
|
||||
price_1m = float(close.iloc[-22])
|
||||
ret_1m = round((current - price_1m) / price_1m * 100, 2)
|
||||
|
||||
ret_3m = None
|
||||
if len(close) >= 63:
|
||||
price_3m = float(close.iloc[-63])
|
||||
ret_3m = round((current - price_3m) / price_3m * 100, 2)
|
||||
|
||||
result[sym] = {
|
||||
"return_1m": ret_1m,
|
||||
"return_3m": ret_3m,
|
||||
"price": current,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("Alpaca ETF perf failed for %s: %s", sym, e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# News
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_news(
|
||||
symbols: Optional[List[str]] = None,
|
||||
limit: int = 10,
|
||||
start_date: Optional[str] = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Fetch news articles from Alpaca News API."""
|
||||
try:
|
||||
from alpaca.data.requests import NewsRequest
|
||||
|
||||
client = _get_news_client()
|
||||
kwargs: Dict[str, Any] = {"limit": limit}
|
||||
if symbols:
|
||||
kwargs["symbols"] = [s.upper() for s in symbols]
|
||||
if start_date:
|
||||
kwargs["start"] = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
|
||||
request = NewsRequest(**kwargs)
|
||||
news = client.get_news(request)
|
||||
|
||||
return [
|
||||
{
|
||||
"title": n.headline,
|
||||
"summary": n.summary or "",
|
||||
"url": n.url,
|
||||
"source": n.source,
|
||||
"created_at": str(n.created_at),
|
||||
"symbols": n.symbols or [],
|
||||
}
|
||||
for n in news.news
|
||||
]
|
||||
except Exception as e:
|
||||
logger.warning("Alpaca news failed: %s", e)
|
||||
return []
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# Import functions from specialized modules
|
||||
from .alpha_vantage_stock import get_stock
|
||||
from .alpha_vantage_indicator import get_indicator
|
||||
from .alpha_vantage_fundamentals import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement
|
||||
# Import functions from specialized modules
|
||||
from .alpha_vantage_stock import get_stock
|
||||
from .alpha_vantage_indicator import get_indicator
|
||||
from .alpha_vantage_fundamentals import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement
|
||||
from .alpha_vantage_news import get_news, get_global_news, get_insider_transactions
|
||||
|
|
@ -1,122 +1,122 @@
|
|||
import os
|
||||
import requests
|
||||
import pandas as pd
|
||||
import json
|
||||
from datetime import datetime
|
||||
from io import StringIO
|
||||
|
||||
API_BASE_URL = "https://www.alphavantage.co/query"
|
||||
|
||||
def get_api_key() -> str:
|
||||
"""Retrieve the API key for Alpha Vantage from environment variables."""
|
||||
api_key = os.getenv("ALPHA_VANTAGE_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("ALPHA_VANTAGE_API_KEY environment variable is not set.")
|
||||
return api_key
|
||||
|
||||
def format_datetime_for_api(date_input) -> str:
|
||||
"""Convert various date formats to YYYYMMDDTHHMM format required by Alpha Vantage API."""
|
||||
if isinstance(date_input, str):
|
||||
# If already in correct format, return as-is
|
||||
if len(date_input) == 13 and 'T' in date_input:
|
||||
return date_input
|
||||
# Try to parse common date formats
|
||||
try:
|
||||
dt = datetime.strptime(date_input, "%Y-%m-%d")
|
||||
return dt.strftime("%Y%m%dT0000")
|
||||
except ValueError:
|
||||
try:
|
||||
dt = datetime.strptime(date_input, "%Y-%m-%d %H:%M")
|
||||
return dt.strftime("%Y%m%dT%H%M")
|
||||
except ValueError:
|
||||
raise ValueError(f"Unsupported date format: {date_input}")
|
||||
elif isinstance(date_input, datetime):
|
||||
return date_input.strftime("%Y%m%dT%H%M")
|
||||
else:
|
||||
raise ValueError(f"Date must be string or datetime object, got {type(date_input)}")
|
||||
|
||||
class AlphaVantageRateLimitError(Exception):
|
||||
"""Exception raised when Alpha Vantage API rate limit is exceeded."""
|
||||
pass
|
||||
|
||||
def _make_api_request(function_name: str, params: dict) -> dict | str:
|
||||
"""Helper function to make API requests and handle responses.
|
||||
|
||||
Raises:
|
||||
AlphaVantageRateLimitError: When API rate limit is exceeded
|
||||
"""
|
||||
# Create a copy of params to avoid modifying the original
|
||||
api_params = params.copy()
|
||||
api_params.update({
|
||||
"function": function_name,
|
||||
"apikey": get_api_key(),
|
||||
"source": "trading_agents",
|
||||
})
|
||||
|
||||
# Handle entitlement parameter if present in params or global variable
|
||||
current_entitlement = globals().get('_current_entitlement')
|
||||
entitlement = api_params.get("entitlement") or current_entitlement
|
||||
|
||||
if entitlement:
|
||||
api_params["entitlement"] = entitlement
|
||||
elif "entitlement" in api_params:
|
||||
# Remove entitlement if it's None or empty
|
||||
api_params.pop("entitlement", None)
|
||||
|
||||
response = requests.get(API_BASE_URL, params=api_params)
|
||||
response.raise_for_status()
|
||||
|
||||
response_text = response.text
|
||||
|
||||
# Check if response is JSON (error responses are typically JSON)
|
||||
try:
|
||||
response_json = json.loads(response_text)
|
||||
# Check for rate limit error
|
||||
if "Information" in response_json:
|
||||
info_message = response_json["Information"]
|
||||
if "rate limit" in info_message.lower() or "api key" in info_message.lower():
|
||||
raise AlphaVantageRateLimitError(f"Alpha Vantage rate limit exceeded: {info_message}")
|
||||
except json.JSONDecodeError:
|
||||
# Response is not JSON (likely CSV data), which is normal
|
||||
pass
|
||||
|
||||
return response_text
|
||||
|
||||
|
||||
|
||||
def _filter_csv_by_date_range(csv_data: str, start_date: str, end_date: str) -> str:
|
||||
"""
|
||||
Filter CSV data to include only rows within the specified date range.
|
||||
|
||||
Args:
|
||||
csv_data: CSV string from Alpha Vantage API
|
||||
start_date: Start date in yyyy-mm-dd format
|
||||
end_date: End date in yyyy-mm-dd format
|
||||
|
||||
Returns:
|
||||
Filtered CSV string
|
||||
"""
|
||||
if not csv_data or csv_data.strip() == "":
|
||||
return csv_data
|
||||
|
||||
try:
|
||||
# Parse CSV data
|
||||
df = pd.read_csv(StringIO(csv_data))
|
||||
|
||||
# Assume the first column is the date column (timestamp)
|
||||
date_col = df.columns[0]
|
||||
df[date_col] = pd.to_datetime(df[date_col])
|
||||
|
||||
# Filter by date range
|
||||
start_dt = pd.to_datetime(start_date)
|
||||
end_dt = pd.to_datetime(end_date)
|
||||
|
||||
filtered_df = df[(df[date_col] >= start_dt) & (df[date_col] <= end_dt)]
|
||||
|
||||
# Convert back to CSV string
|
||||
return filtered_df.to_csv(index=False)
|
||||
|
||||
except Exception as e:
|
||||
# If filtering fails, return original data with a warning
|
||||
print(f"Warning: Failed to filter CSV data by date range: {e}")
|
||||
return csv_data
|
||||
import os
|
||||
import requests
|
||||
import pandas as pd
|
||||
import json
|
||||
from datetime import datetime
|
||||
from io import StringIO
|
||||
|
||||
API_BASE_URL = "https://www.alphavantage.co/query"
|
||||
|
||||
def get_api_key() -> str:
|
||||
"""Retrieve the API key for Alpha Vantage from environment variables."""
|
||||
api_key = os.getenv("ALPHA_VANTAGE_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("ALPHA_VANTAGE_API_KEY environment variable is not set.")
|
||||
return api_key
|
||||
|
||||
def format_datetime_for_api(date_input) -> str:
|
||||
"""Convert various date formats to YYYYMMDDTHHMM format required by Alpha Vantage API."""
|
||||
if isinstance(date_input, str):
|
||||
# If already in correct format, return as-is
|
||||
if len(date_input) == 13 and 'T' in date_input:
|
||||
return date_input
|
||||
# Try to parse common date formats
|
||||
try:
|
||||
dt = datetime.strptime(date_input, "%Y-%m-%d")
|
||||
return dt.strftime("%Y%m%dT0000")
|
||||
except ValueError:
|
||||
try:
|
||||
dt = datetime.strptime(date_input, "%Y-%m-%d %H:%M")
|
||||
return dt.strftime("%Y%m%dT%H%M")
|
||||
except ValueError:
|
||||
raise ValueError(f"Unsupported date format: {date_input}")
|
||||
elif isinstance(date_input, datetime):
|
||||
return date_input.strftime("%Y%m%dT%H%M")
|
||||
else:
|
||||
raise ValueError(f"Date must be string or datetime object, got {type(date_input)}")
|
||||
|
||||
class AlphaVantageRateLimitError(Exception):
|
||||
"""Exception raised when Alpha Vantage API rate limit is exceeded."""
|
||||
pass
|
||||
|
||||
def _make_api_request(function_name: str, params: dict) -> dict | str:
|
||||
"""Helper function to make API requests and handle responses.
|
||||
|
||||
Raises:
|
||||
AlphaVantageRateLimitError: When API rate limit is exceeded
|
||||
"""
|
||||
# Create a copy of params to avoid modifying the original
|
||||
api_params = params.copy()
|
||||
api_params.update({
|
||||
"function": function_name,
|
||||
"apikey": get_api_key(),
|
||||
"source": "trading_agents",
|
||||
})
|
||||
|
||||
# Handle entitlement parameter if present in params or global variable
|
||||
current_entitlement = globals().get('_current_entitlement')
|
||||
entitlement = api_params.get("entitlement") or current_entitlement
|
||||
|
||||
if entitlement:
|
||||
api_params["entitlement"] = entitlement
|
||||
elif "entitlement" in api_params:
|
||||
# Remove entitlement if it's None or empty
|
||||
api_params.pop("entitlement", None)
|
||||
|
||||
response = requests.get(API_BASE_URL, params=api_params)
|
||||
response.raise_for_status()
|
||||
|
||||
response_text = response.text
|
||||
|
||||
# Check if response is JSON (error responses are typically JSON)
|
||||
try:
|
||||
response_json = json.loads(response_text)
|
||||
# Check for rate limit error
|
||||
if "Information" in response_json:
|
||||
info_message = response_json["Information"]
|
||||
if "rate limit" in info_message.lower() or "api key" in info_message.lower():
|
||||
raise AlphaVantageRateLimitError(f"Alpha Vantage rate limit exceeded: {info_message}")
|
||||
except json.JSONDecodeError:
|
||||
# Response is not JSON (likely CSV data), which is normal
|
||||
pass
|
||||
|
||||
return response_text
|
||||
|
||||
|
||||
|
||||
def _filter_csv_by_date_range(csv_data: str, start_date: str, end_date: str) -> str:
|
||||
"""
|
||||
Filter CSV data to include only rows within the specified date range.
|
||||
|
||||
Args:
|
||||
csv_data: CSV string from Alpha Vantage API
|
||||
start_date: Start date in yyyy-mm-dd format
|
||||
end_date: End date in yyyy-mm-dd format
|
||||
|
||||
Returns:
|
||||
Filtered CSV string
|
||||
"""
|
||||
if not csv_data or csv_data.strip() == "":
|
||||
return csv_data
|
||||
|
||||
try:
|
||||
# Parse CSV data
|
||||
df = pd.read_csv(StringIO(csv_data))
|
||||
|
||||
# Assume the first column is the date column (timestamp)
|
||||
date_col = df.columns[0]
|
||||
df[date_col] = pd.to_datetime(df[date_col])
|
||||
|
||||
# Filter by date range
|
||||
start_dt = pd.to_datetime(start_date)
|
||||
end_dt = pd.to_datetime(end_date)
|
||||
|
||||
filtered_df = df[(df[date_col] >= start_dt) & (df[date_col] <= end_dt)]
|
||||
|
||||
# Convert back to CSV string
|
||||
return filtered_df.to_csv(index=False)
|
||||
|
||||
except Exception as e:
|
||||
# If filtering fails, return original data with a warning
|
||||
print(f"Warning: Failed to filter CSV data by date range: {e}")
|
||||
return csv_data
|
||||
|
|
|
|||
|
|
@ -1,77 +1,77 @@
|
|||
from .alpha_vantage_common import _make_api_request
|
||||
|
||||
|
||||
def get_fundamentals(ticker: str, curr_date: str = None) -> str:
|
||||
"""
|
||||
Retrieve comprehensive fundamental data for a given ticker symbol using Alpha Vantage.
|
||||
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage)
|
||||
|
||||
Returns:
|
||||
str: Company overview data including financial ratios and key metrics
|
||||
"""
|
||||
params = {
|
||||
"symbol": ticker,
|
||||
}
|
||||
|
||||
return _make_api_request("OVERVIEW", params)
|
||||
|
||||
|
||||
def get_balance_sheet(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
|
||||
"""
|
||||
Retrieve balance sheet data for a given ticker symbol using Alpha Vantage.
|
||||
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage)
|
||||
|
||||
Returns:
|
||||
str: Balance sheet data with normalized fields
|
||||
"""
|
||||
params = {
|
||||
"symbol": ticker,
|
||||
}
|
||||
|
||||
return _make_api_request("BALANCE_SHEET", params)
|
||||
|
||||
|
||||
def get_cashflow(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
|
||||
"""
|
||||
Retrieve cash flow statement data for a given ticker symbol using Alpha Vantage.
|
||||
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage)
|
||||
|
||||
Returns:
|
||||
str: Cash flow statement data with normalized fields
|
||||
"""
|
||||
params = {
|
||||
"symbol": ticker,
|
||||
}
|
||||
|
||||
return _make_api_request("CASH_FLOW", params)
|
||||
|
||||
|
||||
def get_income_statement(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
|
||||
"""
|
||||
Retrieve income statement data for a given ticker symbol using Alpha Vantage.
|
||||
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage)
|
||||
|
||||
Returns:
|
||||
str: Income statement data with normalized fields
|
||||
"""
|
||||
params = {
|
||||
"symbol": ticker,
|
||||
}
|
||||
|
||||
return _make_api_request("INCOME_STATEMENT", params)
|
||||
|
||||
from .alpha_vantage_common import _make_api_request
|
||||
|
||||
|
||||
def get_fundamentals(ticker: str, curr_date: str = None) -> str:
|
||||
"""
|
||||
Retrieve comprehensive fundamental data for a given ticker symbol using Alpha Vantage.
|
||||
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage)
|
||||
|
||||
Returns:
|
||||
str: Company overview data including financial ratios and key metrics
|
||||
"""
|
||||
params = {
|
||||
"symbol": ticker,
|
||||
}
|
||||
|
||||
return _make_api_request("OVERVIEW", params)
|
||||
|
||||
|
||||
def get_balance_sheet(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
|
||||
"""
|
||||
Retrieve balance sheet data for a given ticker symbol using Alpha Vantage.
|
||||
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage)
|
||||
|
||||
Returns:
|
||||
str: Balance sheet data with normalized fields
|
||||
"""
|
||||
params = {
|
||||
"symbol": ticker,
|
||||
}
|
||||
|
||||
return _make_api_request("BALANCE_SHEET", params)
|
||||
|
||||
|
||||
def get_cashflow(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
|
||||
"""
|
||||
Retrieve cash flow statement data for a given ticker symbol using Alpha Vantage.
|
||||
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage)
|
||||
|
||||
Returns:
|
||||
str: Cash flow statement data with normalized fields
|
||||
"""
|
||||
params = {
|
||||
"symbol": ticker,
|
||||
}
|
||||
|
||||
return _make_api_request("CASH_FLOW", params)
|
||||
|
||||
|
||||
def get_income_statement(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
|
||||
"""
|
||||
Retrieve income statement data for a given ticker symbol using Alpha Vantage.
|
||||
|
||||
Args:
|
||||
ticker (str): Ticker symbol of the company
|
||||
freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage
|
||||
curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage)
|
||||
|
||||
Returns:
|
||||
str: Income statement data with normalized fields
|
||||
"""
|
||||
params = {
|
||||
"symbol": ticker,
|
||||
}
|
||||
|
||||
return _make_api_request("INCOME_STATEMENT", params)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,222 +1,222 @@
|
|||
from .alpha_vantage_common import _make_api_request
|
||||
|
||||
def get_indicator(
|
||||
symbol: str,
|
||||
indicator: str,
|
||||
curr_date: str,
|
||||
look_back_days: int,
|
||||
interval: str = "daily",
|
||||
time_period: int = 14,
|
||||
series_type: str = "close"
|
||||
) -> str:
|
||||
"""
|
||||
Returns Alpha Vantage technical indicator values over a time window.
|
||||
|
||||
Args:
|
||||
symbol: ticker symbol of the company
|
||||
indicator: technical indicator to get the analysis and report of
|
||||
curr_date: The current trading date you are trading on, YYYY-mm-dd
|
||||
look_back_days: how many days to look back
|
||||
interval: Time interval (daily, weekly, monthly)
|
||||
time_period: Number of data points for calculation
|
||||
series_type: The desired price type (close, open, high, low)
|
||||
|
||||
Returns:
|
||||
String containing indicator values and description
|
||||
"""
|
||||
from datetime import datetime
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
supported_indicators = {
|
||||
"close_50_sma": ("50 SMA", "close"),
|
||||
"close_200_sma": ("200 SMA", "close"),
|
||||
"close_10_ema": ("10 EMA", "close"),
|
||||
"macd": ("MACD", "close"),
|
||||
"macds": ("MACD Signal", "close"),
|
||||
"macdh": ("MACD Histogram", "close"),
|
||||
"rsi": ("RSI", "close"),
|
||||
"boll": ("Bollinger Middle", "close"),
|
||||
"boll_ub": ("Bollinger Upper Band", "close"),
|
||||
"boll_lb": ("Bollinger Lower Band", "close"),
|
||||
"atr": ("ATR", None),
|
||||
"vwma": ("VWMA", "close")
|
||||
}
|
||||
|
||||
indicator_descriptions = {
|
||||
"close_50_sma": "50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.",
|
||||
"close_200_sma": "200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries.",
|
||||
"close_10_ema": "10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals.",
|
||||
"macd": "MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets.",
|
||||
"macds": "MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives.",
|
||||
"macdh": "MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets.",
|
||||
"rsi": "RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis.",
|
||||
"boll": "Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals.",
|
||||
"boll_ub": "Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends.",
|
||||
"boll_lb": "Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals.",
|
||||
"atr": "ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy.",
|
||||
"vwma": "VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses."
|
||||
}
|
||||
|
||||
if indicator not in supported_indicators:
|
||||
raise ValueError(
|
||||
f"Indicator {indicator} is not supported. Please choose from: {list(supported_indicators.keys())}"
|
||||
)
|
||||
|
||||
curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
before = curr_date_dt - relativedelta(days=look_back_days)
|
||||
|
||||
# Get the full data for the period instead of making individual calls
|
||||
_, required_series_type = supported_indicators[indicator]
|
||||
|
||||
# Use the provided series_type or fall back to the required one
|
||||
if required_series_type:
|
||||
series_type = required_series_type
|
||||
|
||||
try:
|
||||
# Get indicator data for the period
|
||||
if indicator == "close_50_sma":
|
||||
data = _make_api_request("SMA", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": "50",
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "close_200_sma":
|
||||
data = _make_api_request("SMA", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": "200",
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "close_10_ema":
|
||||
data = _make_api_request("EMA", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": "10",
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "macd":
|
||||
data = _make_api_request("MACD", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "macds":
|
||||
data = _make_api_request("MACD", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "macdh":
|
||||
data = _make_api_request("MACD", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "rsi":
|
||||
data = _make_api_request("RSI", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": str(time_period),
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator in ["boll", "boll_ub", "boll_lb"]:
|
||||
data = _make_api_request("BBANDS", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": "20",
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "atr":
|
||||
data = _make_api_request("ATR", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": str(time_period),
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "vwma":
|
||||
# Alpha Vantage doesn't have direct VWMA, so we'll return an informative message
|
||||
# In a real implementation, this would need to be calculated from OHLCV data
|
||||
return f"## VWMA (Volume Weighted Moving Average) for {symbol}:\n\nVWMA calculation requires OHLCV data and is not directly available from Alpha Vantage API.\nThis indicator would need to be calculated from the raw stock data using volume-weighted price averaging.\n\n{indicator_descriptions.get('vwma', 'No description available.')}"
|
||||
else:
|
||||
return f"Error: Indicator {indicator} not implemented yet."
|
||||
|
||||
# Parse CSV data and extract values for the date range
|
||||
lines = data.strip().split('\n')
|
||||
if len(lines) < 2:
|
||||
return f"Error: No data returned for {indicator}"
|
||||
|
||||
# Parse header and data
|
||||
header = [col.strip() for col in lines[0].split(',')]
|
||||
try:
|
||||
date_col_idx = header.index('time')
|
||||
except ValueError:
|
||||
return f"Error: 'time' column not found in data for {indicator}. Available columns: {header}"
|
||||
|
||||
# Map internal indicator names to expected CSV column names from Alpha Vantage
|
||||
col_name_map = {
|
||||
"macd": "MACD", "macds": "MACD_Signal", "macdh": "MACD_Hist",
|
||||
"boll": "Real Middle Band", "boll_ub": "Real Upper Band", "boll_lb": "Real Lower Band",
|
||||
"rsi": "RSI", "atr": "ATR", "close_10_ema": "EMA",
|
||||
"close_50_sma": "SMA", "close_200_sma": "SMA"
|
||||
}
|
||||
|
||||
target_col_name = col_name_map.get(indicator)
|
||||
|
||||
if not target_col_name:
|
||||
# Default to the second column if no specific mapping exists
|
||||
value_col_idx = 1
|
||||
else:
|
||||
try:
|
||||
value_col_idx = header.index(target_col_name)
|
||||
except ValueError:
|
||||
return f"Error: Column '{target_col_name}' not found for indicator '{indicator}'. Available columns: {header}"
|
||||
|
||||
result_data = []
|
||||
for line in lines[1:]:
|
||||
if not line.strip():
|
||||
continue
|
||||
values = line.split(',')
|
||||
if len(values) > value_col_idx:
|
||||
try:
|
||||
date_str = values[date_col_idx].strip()
|
||||
# Parse the date
|
||||
date_dt = datetime.strptime(date_str, "%Y-%m-%d")
|
||||
|
||||
# Check if date is in our range
|
||||
if before <= date_dt <= curr_date_dt:
|
||||
value = values[value_col_idx].strip()
|
||||
result_data.append((date_dt, value))
|
||||
except (ValueError, IndexError):
|
||||
continue
|
||||
|
||||
# Sort by date and format output
|
||||
result_data.sort(key=lambda x: x[0])
|
||||
|
||||
ind_string = ""
|
||||
for date_dt, value in result_data:
|
||||
ind_string += f"{date_dt.strftime('%Y-%m-%d')}: {value}\n"
|
||||
|
||||
if not ind_string:
|
||||
ind_string = "No data available for the specified date range.\n"
|
||||
|
||||
result_str = (
|
||||
f"## {indicator.upper()} values from {before.strftime('%Y-%m-%d')} to {curr_date}:\n\n"
|
||||
+ ind_string
|
||||
+ "\n\n"
|
||||
+ indicator_descriptions.get(indicator, "No description available.")
|
||||
)
|
||||
|
||||
return result_str
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error getting Alpha Vantage indicator data for {indicator}: {e}")
|
||||
return f"Error retrieving {indicator} data: {str(e)}"
|
||||
from .alpha_vantage_common import _make_api_request
|
||||
|
||||
def get_indicator(
|
||||
symbol: str,
|
||||
indicator: str,
|
||||
curr_date: str,
|
||||
look_back_days: int,
|
||||
interval: str = "daily",
|
||||
time_period: int = 14,
|
||||
series_type: str = "close"
|
||||
) -> str:
|
||||
"""
|
||||
Returns Alpha Vantage technical indicator values over a time window.
|
||||
|
||||
Args:
|
||||
symbol: ticker symbol of the company
|
||||
indicator: technical indicator to get the analysis and report of
|
||||
curr_date: The current trading date you are trading on, YYYY-mm-dd
|
||||
look_back_days: how many days to look back
|
||||
interval: Time interval (daily, weekly, monthly)
|
||||
time_period: Number of data points for calculation
|
||||
series_type: The desired price type (close, open, high, low)
|
||||
|
||||
Returns:
|
||||
String containing indicator values and description
|
||||
"""
|
||||
from datetime import datetime
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
supported_indicators = {
|
||||
"close_50_sma": ("50 SMA", "close"),
|
||||
"close_200_sma": ("200 SMA", "close"),
|
||||
"close_10_ema": ("10 EMA", "close"),
|
||||
"macd": ("MACD", "close"),
|
||||
"macds": ("MACD Signal", "close"),
|
||||
"macdh": ("MACD Histogram", "close"),
|
||||
"rsi": ("RSI", "close"),
|
||||
"boll": ("Bollinger Middle", "close"),
|
||||
"boll_ub": ("Bollinger Upper Band", "close"),
|
||||
"boll_lb": ("Bollinger Lower Band", "close"),
|
||||
"atr": ("ATR", None),
|
||||
"vwma": ("VWMA", "close")
|
||||
}
|
||||
|
||||
indicator_descriptions = {
|
||||
"close_50_sma": "50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.",
|
||||
"close_200_sma": "200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries.",
|
||||
"close_10_ema": "10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals.",
|
||||
"macd": "MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets.",
|
||||
"macds": "MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives.",
|
||||
"macdh": "MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets.",
|
||||
"rsi": "RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis.",
|
||||
"boll": "Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals.",
|
||||
"boll_ub": "Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends.",
|
||||
"boll_lb": "Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals.",
|
||||
"atr": "ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy.",
|
||||
"vwma": "VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses."
|
||||
}
|
||||
|
||||
if indicator not in supported_indicators:
|
||||
raise ValueError(
|
||||
f"Indicator {indicator} is not supported. Please choose from: {list(supported_indicators.keys())}"
|
||||
)
|
||||
|
||||
curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
before = curr_date_dt - relativedelta(days=look_back_days)
|
||||
|
||||
# Get the full data for the period instead of making individual calls
|
||||
_, required_series_type = supported_indicators[indicator]
|
||||
|
||||
# Use the provided series_type or fall back to the required one
|
||||
if required_series_type:
|
||||
series_type = required_series_type
|
||||
|
||||
try:
|
||||
# Get indicator data for the period
|
||||
if indicator == "close_50_sma":
|
||||
data = _make_api_request("SMA", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": "50",
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "close_200_sma":
|
||||
data = _make_api_request("SMA", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": "200",
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "close_10_ema":
|
||||
data = _make_api_request("EMA", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": "10",
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "macd":
|
||||
data = _make_api_request("MACD", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "macds":
|
||||
data = _make_api_request("MACD", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "macdh":
|
||||
data = _make_api_request("MACD", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "rsi":
|
||||
data = _make_api_request("RSI", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": str(time_period),
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator in ["boll", "boll_ub", "boll_lb"]:
|
||||
data = _make_api_request("BBANDS", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": "20",
|
||||
"series_type": series_type,
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "atr":
|
||||
data = _make_api_request("ATR", {
|
||||
"symbol": symbol,
|
||||
"interval": interval,
|
||||
"time_period": str(time_period),
|
||||
"datatype": "csv"
|
||||
})
|
||||
elif indicator == "vwma":
|
||||
# Alpha Vantage doesn't have direct VWMA, so we'll return an informative message
|
||||
# In a real implementation, this would need to be calculated from OHLCV data
|
||||
return f"## VWMA (Volume Weighted Moving Average) for {symbol}:\n\nVWMA calculation requires OHLCV data and is not directly available from Alpha Vantage API.\nThis indicator would need to be calculated from the raw stock data using volume-weighted price averaging.\n\n{indicator_descriptions.get('vwma', 'No description available.')}"
|
||||
else:
|
||||
return f"Error: Indicator {indicator} not implemented yet."
|
||||
|
||||
# Parse CSV data and extract values for the date range
|
||||
lines = data.strip().split('\n')
|
||||
if len(lines) < 2:
|
||||
return f"Error: No data returned for {indicator}"
|
||||
|
||||
# Parse header and data
|
||||
header = [col.strip() for col in lines[0].split(',')]
|
||||
try:
|
||||
date_col_idx = header.index('time')
|
||||
except ValueError:
|
||||
return f"Error: 'time' column not found in data for {indicator}. Available columns: {header}"
|
||||
|
||||
# Map internal indicator names to expected CSV column names from Alpha Vantage
|
||||
col_name_map = {
|
||||
"macd": "MACD", "macds": "MACD_Signal", "macdh": "MACD_Hist",
|
||||
"boll": "Real Middle Band", "boll_ub": "Real Upper Band", "boll_lb": "Real Lower Band",
|
||||
"rsi": "RSI", "atr": "ATR", "close_10_ema": "EMA",
|
||||
"close_50_sma": "SMA", "close_200_sma": "SMA"
|
||||
}
|
||||
|
||||
target_col_name = col_name_map.get(indicator)
|
||||
|
||||
if not target_col_name:
|
||||
# Default to the second column if no specific mapping exists
|
||||
value_col_idx = 1
|
||||
else:
|
||||
try:
|
||||
value_col_idx = header.index(target_col_name)
|
||||
except ValueError:
|
||||
return f"Error: Column '{target_col_name}' not found for indicator '{indicator}'. Available columns: {header}"
|
||||
|
||||
result_data = []
|
||||
for line in lines[1:]:
|
||||
if not line.strip():
|
||||
continue
|
||||
values = line.split(',')
|
||||
if len(values) > value_col_idx:
|
||||
try:
|
||||
date_str = values[date_col_idx].strip()
|
||||
# Parse the date
|
||||
date_dt = datetime.strptime(date_str, "%Y-%m-%d")
|
||||
|
||||
# Check if date is in our range
|
||||
if before <= date_dt <= curr_date_dt:
|
||||
value = values[value_col_idx].strip()
|
||||
result_data.append((date_dt, value))
|
||||
except (ValueError, IndexError):
|
||||
continue
|
||||
|
||||
# Sort by date and format output
|
||||
result_data.sort(key=lambda x: x[0])
|
||||
|
||||
ind_string = ""
|
||||
for date_dt, value in result_data:
|
||||
ind_string += f"{date_dt.strftime('%Y-%m-%d')}: {value}\n"
|
||||
|
||||
if not ind_string:
|
||||
ind_string = "No data available for the specified date range.\n"
|
||||
|
||||
result_str = (
|
||||
f"## {indicator.upper()} values from {before.strftime('%Y-%m-%d')} to {curr_date}:\n\n"
|
||||
+ ind_string
|
||||
+ "\n\n"
|
||||
+ indicator_descriptions.get(indicator, "No description available.")
|
||||
)
|
||||
|
||||
return result_str
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error getting Alpha Vantage indicator data for {indicator}: {e}")
|
||||
return f"Error retrieving {indicator} data: {str(e)}"
|
||||
|
|
|
|||
|
|
@ -1,71 +1,71 @@
|
|||
from .alpha_vantage_common import _make_api_request, format_datetime_for_api
|
||||
|
||||
def get_news(ticker, start_date, end_date) -> dict[str, str] | str:
|
||||
"""Returns live and historical market news & sentiment data from premier news outlets worldwide.
|
||||
|
||||
Covers stocks, cryptocurrencies, forex, and topics like fiscal policy, mergers & acquisitions, IPOs.
|
||||
|
||||
Args:
|
||||
ticker: Stock symbol for news articles.
|
||||
start_date: Start date for news search.
|
||||
end_date: End date for news search.
|
||||
|
||||
Returns:
|
||||
Dictionary containing news sentiment data or JSON string.
|
||||
"""
|
||||
|
||||
params = {
|
||||
"tickers": ticker,
|
||||
"time_from": format_datetime_for_api(start_date),
|
||||
"time_to": format_datetime_for_api(end_date),
|
||||
}
|
||||
|
||||
return _make_api_request("NEWS_SENTIMENT", params)
|
||||
|
||||
def get_global_news(curr_date, look_back_days: int = 7, limit: int = 50) -> dict[str, str] | str:
|
||||
"""Returns global market news & sentiment data without ticker-specific filtering.
|
||||
|
||||
Covers broad market topics like financial markets, economy, and more.
|
||||
|
||||
Args:
|
||||
curr_date: Current date in yyyy-mm-dd format.
|
||||
look_back_days: Number of days to look back (default 7).
|
||||
limit: Maximum number of articles (default 50).
|
||||
|
||||
Returns:
|
||||
Dictionary containing global news sentiment data or JSON string.
|
||||
"""
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
# Calculate start date
|
||||
curr_dt = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
start_dt = curr_dt - timedelta(days=look_back_days)
|
||||
start_date = start_dt.strftime("%Y-%m-%d")
|
||||
|
||||
params = {
|
||||
"topics": "financial_markets,economy_macro,economy_monetary",
|
||||
"time_from": format_datetime_for_api(start_date),
|
||||
"time_to": format_datetime_for_api(curr_date),
|
||||
"limit": str(limit),
|
||||
}
|
||||
|
||||
return _make_api_request("NEWS_SENTIMENT", params)
|
||||
|
||||
|
||||
def get_insider_transactions(symbol: str) -> dict[str, str] | str:
|
||||
"""Returns latest and historical insider transactions by key stakeholders.
|
||||
|
||||
Covers transactions by founders, executives, board members, etc.
|
||||
|
||||
Args:
|
||||
symbol: Ticker symbol. Example: "IBM".
|
||||
|
||||
Returns:
|
||||
Dictionary containing insider transaction data or JSON string.
|
||||
"""
|
||||
|
||||
params = {
|
||||
"symbol": symbol,
|
||||
}
|
||||
|
||||
from .alpha_vantage_common import _make_api_request, format_datetime_for_api
|
||||
|
||||
def get_news(ticker, start_date, end_date) -> dict[str, str] | str:
|
||||
"""Returns live and historical market news & sentiment data from premier news outlets worldwide.
|
||||
|
||||
Covers stocks, cryptocurrencies, forex, and topics like fiscal policy, mergers & acquisitions, IPOs.
|
||||
|
||||
Args:
|
||||
ticker: Stock symbol for news articles.
|
||||
start_date: Start date for news search.
|
||||
end_date: End date for news search.
|
||||
|
||||
Returns:
|
||||
Dictionary containing news sentiment data or JSON string.
|
||||
"""
|
||||
|
||||
params = {
|
||||
"tickers": ticker,
|
||||
"time_from": format_datetime_for_api(start_date),
|
||||
"time_to": format_datetime_for_api(end_date),
|
||||
}
|
||||
|
||||
return _make_api_request("NEWS_SENTIMENT", params)
|
||||
|
||||
def get_global_news(curr_date, look_back_days: int = 7, limit: int = 50) -> dict[str, str] | str:
|
||||
"""Returns global market news & sentiment data without ticker-specific filtering.
|
||||
|
||||
Covers broad market topics like financial markets, economy, and more.
|
||||
|
||||
Args:
|
||||
curr_date: Current date in yyyy-mm-dd format.
|
||||
look_back_days: Number of days to look back (default 7).
|
||||
limit: Maximum number of articles (default 50).
|
||||
|
||||
Returns:
|
||||
Dictionary containing global news sentiment data or JSON string.
|
||||
"""
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
# Calculate start date
|
||||
curr_dt = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
start_dt = curr_dt - timedelta(days=look_back_days)
|
||||
start_date = start_dt.strftime("%Y-%m-%d")
|
||||
|
||||
params = {
|
||||
"topics": "financial_markets,economy_macro,economy_monetary",
|
||||
"time_from": format_datetime_for_api(start_date),
|
||||
"time_to": format_datetime_for_api(curr_date),
|
||||
"limit": str(limit),
|
||||
}
|
||||
|
||||
return _make_api_request("NEWS_SENTIMENT", params)
|
||||
|
||||
|
||||
def get_insider_transactions(symbol: str) -> dict[str, str] | str:
|
||||
"""Returns latest and historical insider transactions by key stakeholders.
|
||||
|
||||
Covers transactions by founders, executives, board members, etc.
|
||||
|
||||
Args:
|
||||
symbol: Ticker symbol. Example: "IBM".
|
||||
|
||||
Returns:
|
||||
Dictionary containing insider transaction data or JSON string.
|
||||
"""
|
||||
|
||||
params = {
|
||||
"symbol": symbol,
|
||||
}
|
||||
|
||||
return _make_api_request("INSIDER_TRANSACTIONS", params)
|
||||
|
|
@ -1,38 +1,38 @@
|
|||
from datetime import datetime
|
||||
from .alpha_vantage_common import _make_api_request, _filter_csv_by_date_range
|
||||
|
||||
def get_stock(
|
||||
symbol: str,
|
||||
start_date: str,
|
||||
end_date: str
|
||||
) -> str:
|
||||
"""
|
||||
Returns raw daily OHLCV values, adjusted close values, and historical split/dividend events
|
||||
filtered to the specified date range.
|
||||
|
||||
Args:
|
||||
symbol: The name of the equity. For example: symbol=IBM
|
||||
start_date: Start date in yyyy-mm-dd format
|
||||
end_date: End date in yyyy-mm-dd format
|
||||
|
||||
Returns:
|
||||
CSV string containing the daily adjusted time series data filtered to the date range.
|
||||
"""
|
||||
# Parse dates to determine the range
|
||||
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
today = datetime.now()
|
||||
|
||||
# Choose outputsize based on whether the requested range is within the latest 100 days
|
||||
# Compact returns latest 100 data points, so check if start_date is recent enough
|
||||
days_from_today_to_start = (today - start_dt).days
|
||||
outputsize = "compact" if days_from_today_to_start < 100 else "full"
|
||||
|
||||
params = {
|
||||
"symbol": symbol,
|
||||
"outputsize": outputsize,
|
||||
"datatype": "csv",
|
||||
}
|
||||
|
||||
response = _make_api_request("TIME_SERIES_DAILY_ADJUSTED", params)
|
||||
|
||||
from datetime import datetime
|
||||
from .alpha_vantage_common import _make_api_request, _filter_csv_by_date_range
|
||||
|
||||
def get_stock(
|
||||
symbol: str,
|
||||
start_date: str,
|
||||
end_date: str
|
||||
) -> str:
|
||||
"""
|
||||
Returns raw daily OHLCV values, adjusted close values, and historical split/dividend events
|
||||
filtered to the specified date range.
|
||||
|
||||
Args:
|
||||
symbol: The name of the equity. For example: symbol=IBM
|
||||
start_date: Start date in yyyy-mm-dd format
|
||||
end_date: End date in yyyy-mm-dd format
|
||||
|
||||
Returns:
|
||||
CSV string containing the daily adjusted time series data filtered to the date range.
|
||||
"""
|
||||
# Parse dates to determine the range
|
||||
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
today = datetime.now()
|
||||
|
||||
# Choose outputsize based on whether the requested range is within the latest 100 days
|
||||
# Compact returns latest 100 data points, so check if start_date is recent enough
|
||||
days_from_today_to_start = (today - start_dt).days
|
||||
outputsize = "compact" if days_from_today_to_start < 100 else "full"
|
||||
|
||||
params = {
|
||||
"symbol": symbol,
|
||||
"outputsize": outputsize,
|
||||
"datatype": "csv",
|
||||
}
|
||||
|
||||
response = _make_api_request("TIME_SERIES_DAILY_ADJUSTED", params)
|
||||
|
||||
return _filter_csv_by_date_range(response, start_date, end_date)
|
||||
|
|
@ -1,31 +1,31 @@
|
|||
import tradingagents.default_config as default_config
|
||||
from typing import Dict, Optional
|
||||
|
||||
# Use default config but allow it to be overridden
|
||||
_config: Optional[Dict] = None
|
||||
|
||||
|
||||
def initialize_config():
|
||||
"""Initialize the configuration with default values."""
|
||||
global _config
|
||||
if _config is None:
|
||||
_config = default_config.DEFAULT_CONFIG.copy()
|
||||
|
||||
|
||||
def set_config(config: Dict):
|
||||
"""Update the configuration with custom values."""
|
||||
global _config
|
||||
if _config is None:
|
||||
_config = default_config.DEFAULT_CONFIG.copy()
|
||||
_config.update(config)
|
||||
|
||||
|
||||
def get_config() -> Dict:
|
||||
"""Get the current configuration."""
|
||||
if _config is None:
|
||||
initialize_config()
|
||||
return _config.copy()
|
||||
|
||||
|
||||
# Initialize with default config
|
||||
initialize_config()
|
||||
import tradingagents.default_config as default_config
|
||||
from typing import Dict, Optional
|
||||
|
||||
# Use default config but allow it to be overridden
|
||||
_config: Optional[Dict] = None
|
||||
|
||||
|
||||
def initialize_config():
|
||||
"""Initialize the configuration with default values."""
|
||||
global _config
|
||||
if _config is None:
|
||||
_config = default_config.DEFAULT_CONFIG.copy()
|
||||
|
||||
|
||||
def set_config(config: Dict):
|
||||
"""Update the configuration with custom values."""
|
||||
global _config
|
||||
if _config is None:
|
||||
_config = default_config.DEFAULT_CONFIG.copy()
|
||||
_config.update(config)
|
||||
|
||||
|
||||
def get_config() -> Dict:
|
||||
"""Get the current configuration."""
|
||||
if _config is None:
|
||||
initialize_config()
|
||||
return _config.copy()
|
||||
|
||||
|
||||
# Initialize with default config
|
||||
initialize_config()
|
||||
|
|
|
|||
|
|
@ -1,162 +1,162 @@
|
|||
from typing import Annotated
|
||||
|
||||
# Import from vendor-specific modules
|
||||
from .y_finance import (
|
||||
get_YFin_data_online,
|
||||
get_stock_stats_indicators_window,
|
||||
get_fundamentals as get_yfinance_fundamentals,
|
||||
get_balance_sheet as get_yfinance_balance_sheet,
|
||||
get_cashflow as get_yfinance_cashflow,
|
||||
get_income_statement as get_yfinance_income_statement,
|
||||
get_insider_transactions as get_yfinance_insider_transactions,
|
||||
)
|
||||
from .yfinance_news import get_news_yfinance, get_global_news_yfinance
|
||||
from .alpha_vantage import (
|
||||
get_stock as get_alpha_vantage_stock,
|
||||
get_indicator as get_alpha_vantage_indicator,
|
||||
get_fundamentals as get_alpha_vantage_fundamentals,
|
||||
get_balance_sheet as get_alpha_vantage_balance_sheet,
|
||||
get_cashflow as get_alpha_vantage_cashflow,
|
||||
get_income_statement as get_alpha_vantage_income_statement,
|
||||
get_insider_transactions as get_alpha_vantage_insider_transactions,
|
||||
get_news as get_alpha_vantage_news,
|
||||
get_global_news as get_alpha_vantage_global_news,
|
||||
)
|
||||
from .alpha_vantage_common import AlphaVantageRateLimitError
|
||||
|
||||
# Configuration and routing logic
|
||||
from .config import get_config
|
||||
|
||||
# Tools organized by category
|
||||
TOOLS_CATEGORIES = {
|
||||
"core_stock_apis": {
|
||||
"description": "OHLCV stock price data",
|
||||
"tools": [
|
||||
"get_stock_data"
|
||||
]
|
||||
},
|
||||
"technical_indicators": {
|
||||
"description": "Technical analysis indicators",
|
||||
"tools": [
|
||||
"get_indicators"
|
||||
]
|
||||
},
|
||||
"fundamental_data": {
|
||||
"description": "Company fundamentals",
|
||||
"tools": [
|
||||
"get_fundamentals",
|
||||
"get_balance_sheet",
|
||||
"get_cashflow",
|
||||
"get_income_statement"
|
||||
]
|
||||
},
|
||||
"news_data": {
|
||||
"description": "News and insider data",
|
||||
"tools": [
|
||||
"get_news",
|
||||
"get_global_news",
|
||||
"get_insider_transactions",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
VENDOR_LIST = [
|
||||
"yfinance",
|
||||
"alpha_vantage",
|
||||
]
|
||||
|
||||
# Mapping of methods to their vendor-specific implementations
|
||||
VENDOR_METHODS = {
|
||||
# core_stock_apis
|
||||
"get_stock_data": {
|
||||
"alpha_vantage": get_alpha_vantage_stock,
|
||||
"yfinance": get_YFin_data_online,
|
||||
},
|
||||
# technical_indicators
|
||||
"get_indicators": {
|
||||
"alpha_vantage": get_alpha_vantage_indicator,
|
||||
"yfinance": get_stock_stats_indicators_window,
|
||||
},
|
||||
# fundamental_data
|
||||
"get_fundamentals": {
|
||||
"alpha_vantage": get_alpha_vantage_fundamentals,
|
||||
"yfinance": get_yfinance_fundamentals,
|
||||
},
|
||||
"get_balance_sheet": {
|
||||
"alpha_vantage": get_alpha_vantage_balance_sheet,
|
||||
"yfinance": get_yfinance_balance_sheet,
|
||||
},
|
||||
"get_cashflow": {
|
||||
"alpha_vantage": get_alpha_vantage_cashflow,
|
||||
"yfinance": get_yfinance_cashflow,
|
||||
},
|
||||
"get_income_statement": {
|
||||
"alpha_vantage": get_alpha_vantage_income_statement,
|
||||
"yfinance": get_yfinance_income_statement,
|
||||
},
|
||||
# news_data
|
||||
"get_news": {
|
||||
"alpha_vantage": get_alpha_vantage_news,
|
||||
"yfinance": get_news_yfinance,
|
||||
},
|
||||
"get_global_news": {
|
||||
"yfinance": get_global_news_yfinance,
|
||||
"alpha_vantage": get_alpha_vantage_global_news,
|
||||
},
|
||||
"get_insider_transactions": {
|
||||
"alpha_vantage": get_alpha_vantage_insider_transactions,
|
||||
"yfinance": get_yfinance_insider_transactions,
|
||||
},
|
||||
}
|
||||
|
||||
def get_category_for_method(method: str) -> str:
|
||||
"""Get the category that contains the specified method."""
|
||||
for category, info in TOOLS_CATEGORIES.items():
|
||||
if method in info["tools"]:
|
||||
return category
|
||||
raise ValueError(f"Method '{method}' not found in any category")
|
||||
|
||||
def get_vendor(category: str, method: str = None) -> str:
|
||||
"""Get the configured vendor for a data category or specific tool method.
|
||||
Tool-level configuration takes precedence over category-level.
|
||||
"""
|
||||
config = get_config()
|
||||
|
||||
# Check tool-level configuration first (if method provided)
|
||||
if method:
|
||||
tool_vendors = config.get("tool_vendors", {})
|
||||
if method in tool_vendors:
|
||||
return tool_vendors[method]
|
||||
|
||||
# Fall back to category-level configuration
|
||||
return config.get("data_vendors", {}).get(category, "default")
|
||||
|
||||
def route_to_vendor(method: str, *args, **kwargs):
|
||||
"""Route method calls to appropriate vendor implementation with fallback support."""
|
||||
category = get_category_for_method(method)
|
||||
vendor_config = get_vendor(category, method)
|
||||
primary_vendors = [v.strip() for v in vendor_config.split(',')]
|
||||
|
||||
if method not in VENDOR_METHODS:
|
||||
raise ValueError(f"Method '{method}' not supported")
|
||||
|
||||
# Build fallback chain: primary vendors first, then remaining available vendors
|
||||
all_available_vendors = list(VENDOR_METHODS[method].keys())
|
||||
fallback_vendors = primary_vendors.copy()
|
||||
for vendor in all_available_vendors:
|
||||
if vendor not in fallback_vendors:
|
||||
fallback_vendors.append(vendor)
|
||||
|
||||
for vendor in fallback_vendors:
|
||||
if vendor not in VENDOR_METHODS[method]:
|
||||
continue
|
||||
|
||||
vendor_impl = VENDOR_METHODS[method][vendor]
|
||||
impl_func = vendor_impl[0] if isinstance(vendor_impl, list) else vendor_impl
|
||||
|
||||
try:
|
||||
return impl_func(*args, **kwargs)
|
||||
except AlphaVantageRateLimitError:
|
||||
continue # Only rate limits trigger fallback
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
# Import from vendor-specific modules
|
||||
from .y_finance import (
|
||||
get_YFin_data_online,
|
||||
get_stock_stats_indicators_window,
|
||||
get_fundamentals as get_yfinance_fundamentals,
|
||||
get_balance_sheet as get_yfinance_balance_sheet,
|
||||
get_cashflow as get_yfinance_cashflow,
|
||||
get_income_statement as get_yfinance_income_statement,
|
||||
get_insider_transactions as get_yfinance_insider_transactions,
|
||||
)
|
||||
from .yfinance_news import get_news_yfinance, get_global_news_yfinance
|
||||
from .alpha_vantage import (
|
||||
get_stock as get_alpha_vantage_stock,
|
||||
get_indicator as get_alpha_vantage_indicator,
|
||||
get_fundamentals as get_alpha_vantage_fundamentals,
|
||||
get_balance_sheet as get_alpha_vantage_balance_sheet,
|
||||
get_cashflow as get_alpha_vantage_cashflow,
|
||||
get_income_statement as get_alpha_vantage_income_statement,
|
||||
get_insider_transactions as get_alpha_vantage_insider_transactions,
|
||||
get_news as get_alpha_vantage_news,
|
||||
get_global_news as get_alpha_vantage_global_news,
|
||||
)
|
||||
from .alpha_vantage_common import AlphaVantageRateLimitError
|
||||
|
||||
# Configuration and routing logic
|
||||
from .config import get_config
|
||||
|
||||
# Tools organized by category
|
||||
TOOLS_CATEGORIES = {
|
||||
"core_stock_apis": {
|
||||
"description": "OHLCV stock price data",
|
||||
"tools": [
|
||||
"get_stock_data"
|
||||
]
|
||||
},
|
||||
"technical_indicators": {
|
||||
"description": "Technical analysis indicators",
|
||||
"tools": [
|
||||
"get_indicators"
|
||||
]
|
||||
},
|
||||
"fundamental_data": {
|
||||
"description": "Company fundamentals",
|
||||
"tools": [
|
||||
"get_fundamentals",
|
||||
"get_balance_sheet",
|
||||
"get_cashflow",
|
||||
"get_income_statement"
|
||||
]
|
||||
},
|
||||
"news_data": {
|
||||
"description": "News and insider data",
|
||||
"tools": [
|
||||
"get_news",
|
||||
"get_global_news",
|
||||
"get_insider_transactions",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
VENDOR_LIST = [
|
||||
"yfinance",
|
||||
"alpha_vantage",
|
||||
]
|
||||
|
||||
# Mapping of methods to their vendor-specific implementations
|
||||
VENDOR_METHODS = {
|
||||
# core_stock_apis
|
||||
"get_stock_data": {
|
||||
"alpha_vantage": get_alpha_vantage_stock,
|
||||
"yfinance": get_YFin_data_online,
|
||||
},
|
||||
# technical_indicators
|
||||
"get_indicators": {
|
||||
"alpha_vantage": get_alpha_vantage_indicator,
|
||||
"yfinance": get_stock_stats_indicators_window,
|
||||
},
|
||||
# fundamental_data
|
||||
"get_fundamentals": {
|
||||
"alpha_vantage": get_alpha_vantage_fundamentals,
|
||||
"yfinance": get_yfinance_fundamentals,
|
||||
},
|
||||
"get_balance_sheet": {
|
||||
"alpha_vantage": get_alpha_vantage_balance_sheet,
|
||||
"yfinance": get_yfinance_balance_sheet,
|
||||
},
|
||||
"get_cashflow": {
|
||||
"alpha_vantage": get_alpha_vantage_cashflow,
|
||||
"yfinance": get_yfinance_cashflow,
|
||||
},
|
||||
"get_income_statement": {
|
||||
"alpha_vantage": get_alpha_vantage_income_statement,
|
||||
"yfinance": get_yfinance_income_statement,
|
||||
},
|
||||
# news_data
|
||||
"get_news": {
|
||||
"alpha_vantage": get_alpha_vantage_news,
|
||||
"yfinance": get_news_yfinance,
|
||||
},
|
||||
"get_global_news": {
|
||||
"yfinance": get_global_news_yfinance,
|
||||
"alpha_vantage": get_alpha_vantage_global_news,
|
||||
},
|
||||
"get_insider_transactions": {
|
||||
"alpha_vantage": get_alpha_vantage_insider_transactions,
|
||||
"yfinance": get_yfinance_insider_transactions,
|
||||
},
|
||||
}
|
||||
|
||||
def get_category_for_method(method: str) -> str:
|
||||
"""Get the category that contains the specified method."""
|
||||
for category, info in TOOLS_CATEGORIES.items():
|
||||
if method in info["tools"]:
|
||||
return category
|
||||
raise ValueError(f"Method '{method}' not found in any category")
|
||||
|
||||
def get_vendor(category: str, method: str = None) -> str:
|
||||
"""Get the configured vendor for a data category or specific tool method.
|
||||
Tool-level configuration takes precedence over category-level.
|
||||
"""
|
||||
config = get_config()
|
||||
|
||||
# Check tool-level configuration first (if method provided)
|
||||
if method:
|
||||
tool_vendors = config.get("tool_vendors", {})
|
||||
if method in tool_vendors:
|
||||
return tool_vendors[method]
|
||||
|
||||
# Fall back to category-level configuration
|
||||
return config.get("data_vendors", {}).get(category, "default")
|
||||
|
||||
def route_to_vendor(method: str, *args, **kwargs):
|
||||
"""Route method calls to appropriate vendor implementation with fallback support."""
|
||||
category = get_category_for_method(method)
|
||||
vendor_config = get_vendor(category, method)
|
||||
primary_vendors = [v.strip() for v in vendor_config.split(',')]
|
||||
|
||||
if method not in VENDOR_METHODS:
|
||||
raise ValueError(f"Method '{method}' not supported")
|
||||
|
||||
# Build fallback chain: primary vendors first, then remaining available vendors
|
||||
all_available_vendors = list(VENDOR_METHODS[method].keys())
|
||||
fallback_vendors = primary_vendors.copy()
|
||||
for vendor in all_available_vendors:
|
||||
if vendor not in fallback_vendors:
|
||||
fallback_vendors.append(vendor)
|
||||
|
||||
for vendor in fallback_vendors:
|
||||
if vendor not in VENDOR_METHODS[method]:
|
||||
continue
|
||||
|
||||
vendor_impl = VENDOR_METHODS[method][vendor]
|
||||
impl_func = vendor_impl[0] if isinstance(vendor_impl, list) else vendor_impl
|
||||
|
||||
try:
|
||||
return impl_func(*args, **kwargs)
|
||||
except AlphaVantageRateLimitError:
|
||||
continue # Only rate limits trigger fallback
|
||||
|
||||
raise RuntimeError(f"No available vendor for '{method}'")
|
||||
|
|
@ -1,64 +1,64 @@
|
|||
import pandas as pd
|
||||
import yfinance as yf
|
||||
from stockstats import wrap
|
||||
from typing import Annotated
|
||||
import os
|
||||
from .config import get_config
|
||||
|
||||
|
||||
class StockstatsUtils:
|
||||
@staticmethod
|
||||
def get_stock_stats(
|
||||
symbol: Annotated[str, "ticker symbol for the company"],
|
||||
indicator: Annotated[
|
||||
str, "quantitative indicators based off of the stock data for the company"
|
||||
],
|
||||
curr_date: Annotated[
|
||||
str, "curr date for retrieving stock price data, YYYY-mm-dd"
|
||||
],
|
||||
):
|
||||
config = get_config()
|
||||
|
||||
today_date = pd.Timestamp.today()
|
||||
curr_date_dt = pd.to_datetime(curr_date)
|
||||
|
||||
end_date = today_date
|
||||
start_date = today_date - pd.DateOffset(years=15)
|
||||
start_date_str = start_date.strftime("%Y-%m-%d")
|
||||
end_date_str = end_date.strftime("%Y-%m-%d")
|
||||
|
||||
# Ensure cache directory exists
|
||||
os.makedirs(config["data_cache_dir"], exist_ok=True)
|
||||
|
||||
data_file = os.path.join(
|
||||
config["data_cache_dir"],
|
||||
f"{symbol}-YFin-data-{start_date_str}-{end_date_str}.csv",
|
||||
)
|
||||
|
||||
if os.path.exists(data_file):
|
||||
data = pd.read_csv(data_file)
|
||||
data["Date"] = pd.to_datetime(data["Date"])
|
||||
else:
|
||||
data = yf.download(
|
||||
symbol,
|
||||
start=start_date_str,
|
||||
end=end_date_str,
|
||||
multi_level_index=False,
|
||||
progress=False,
|
||||
auto_adjust=True,
|
||||
)
|
||||
data = data.reset_index()
|
||||
data.to_csv(data_file, index=False)
|
||||
|
||||
df = wrap(data)
|
||||
df["Date"] = df["Date"].dt.strftime("%Y-%m-%d")
|
||||
curr_date_str = curr_date_dt.strftime("%Y-%m-%d")
|
||||
|
||||
df[indicator] # trigger stockstats to calculate the indicator
|
||||
matching_rows = df[df["Date"].str.startswith(curr_date_str)]
|
||||
|
||||
if not matching_rows.empty:
|
||||
indicator_value = matching_rows[indicator].values[0]
|
||||
return indicator_value
|
||||
else:
|
||||
return "N/A: Not a trading day (weekend or holiday)"
|
||||
import pandas as pd
|
||||
import yfinance as yf
|
||||
from stockstats import wrap
|
||||
from typing import Annotated
|
||||
import os
|
||||
from .config import get_config
|
||||
|
||||
|
||||
class StockstatsUtils:
|
||||
@staticmethod
|
||||
def get_stock_stats(
|
||||
symbol: Annotated[str, "ticker symbol for the company"],
|
||||
indicator: Annotated[
|
||||
str, "quantitative indicators based off of the stock data for the company"
|
||||
],
|
||||
curr_date: Annotated[
|
||||
str, "curr date for retrieving stock price data, YYYY-mm-dd"
|
||||
],
|
||||
):
|
||||
config = get_config()
|
||||
|
||||
today_date = pd.Timestamp.today()
|
||||
curr_date_dt = pd.to_datetime(curr_date)
|
||||
|
||||
end_date = today_date
|
||||
start_date = today_date - pd.DateOffset(years=15)
|
||||
start_date_str = start_date.strftime("%Y-%m-%d")
|
||||
end_date_str = end_date.strftime("%Y-%m-%d")
|
||||
|
||||
# Ensure cache directory exists
|
||||
os.makedirs(config["data_cache_dir"], exist_ok=True)
|
||||
|
||||
data_file = os.path.join(
|
||||
config["data_cache_dir"],
|
||||
f"{symbol}-YFin-data-{start_date_str}-{end_date_str}.csv",
|
||||
)
|
||||
|
||||
if os.path.exists(data_file):
|
||||
data = pd.read_csv(data_file)
|
||||
data["Date"] = pd.to_datetime(data["Date"])
|
||||
else:
|
||||
data = yf.download(
|
||||
symbol,
|
||||
start=start_date_str,
|
||||
end=end_date_str,
|
||||
multi_level_index=False,
|
||||
progress=False,
|
||||
auto_adjust=True,
|
||||
)
|
||||
data = data.reset_index()
|
||||
data.to_csv(data_file, index=False)
|
||||
|
||||
df = wrap(data)
|
||||
df["Date"] = df["Date"].dt.strftime("%Y-%m-%d")
|
||||
curr_date_str = curr_date_dt.strftime("%Y-%m-%d")
|
||||
|
||||
df[indicator] # trigger stockstats to calculate the indicator
|
||||
matching_rows = df[df["Date"].str.startswith(curr_date_str)]
|
||||
|
||||
if not matching_rows.empty:
|
||||
indicator_value = matching_rows[indicator].values[0]
|
||||
return indicator_value
|
||||
else:
|
||||
return "N/A: Not a trading day (weekend or holiday)"
|
||||
|
|
|
|||
|
|
@ -1,39 +1,39 @@
|
|||
import os
|
||||
import json
|
||||
import pandas as pd
|
||||
from datetime import date, timedelta, datetime
|
||||
from typing import Annotated
|
||||
|
||||
SavePathType = Annotated[str, "File path to save data. If None, data is not saved."]
|
||||
|
||||
def save_output(data: pd.DataFrame, tag: str, save_path: SavePathType = None) -> None:
|
||||
if save_path:
|
||||
data.to_csv(save_path)
|
||||
print(f"{tag} saved to {save_path}")
|
||||
|
||||
|
||||
def get_current_date():
|
||||
return date.today().strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def decorate_all_methods(decorator):
|
||||
def class_decorator(cls):
|
||||
for attr_name, attr_value in cls.__dict__.items():
|
||||
if callable(attr_value):
|
||||
setattr(cls, attr_name, decorator(attr_value))
|
||||
return cls
|
||||
|
||||
return class_decorator
|
||||
|
||||
|
||||
def get_next_weekday(date):
|
||||
|
||||
if not isinstance(date, datetime):
|
||||
date = datetime.strptime(date, "%Y-%m-%d")
|
||||
|
||||
if date.weekday() >= 5:
|
||||
days_to_add = 7 - date.weekday()
|
||||
next_weekday = date + timedelta(days=days_to_add)
|
||||
return next_weekday
|
||||
else:
|
||||
return date
|
||||
import os
|
||||
import json
|
||||
import pandas as pd
|
||||
from datetime import date, timedelta, datetime
|
||||
from typing import Annotated
|
||||
|
||||
SavePathType = Annotated[str, "File path to save data. If None, data is not saved."]
|
||||
|
||||
def save_output(data: pd.DataFrame, tag: str, save_path: SavePathType = None) -> None:
|
||||
if save_path:
|
||||
data.to_csv(save_path)
|
||||
print(f"{tag} saved to {save_path}")
|
||||
|
||||
|
||||
def get_current_date():
|
||||
return date.today().strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def decorate_all_methods(decorator):
|
||||
def class_decorator(cls):
|
||||
for attr_name, attr_value in cls.__dict__.items():
|
||||
if callable(attr_value):
|
||||
setattr(cls, attr_name, decorator(attr_value))
|
||||
return cls
|
||||
|
||||
return class_decorator
|
||||
|
||||
|
||||
def get_next_weekday(date):
|
||||
|
||||
if not isinstance(date, datetime):
|
||||
date = datetime.strptime(date, "%Y-%m-%d")
|
||||
|
||||
if date.weekday() >= 5:
|
||||
days_to_add = 7 - date.weekday()
|
||||
next_weekday = date + timedelta(days=days_to_add)
|
||||
return next_weekday
|
||||
else:
|
||||
return date
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,190 +1,190 @@
|
|||
"""yfinance-based news data fetching functions."""
|
||||
|
||||
import yfinance as yf
|
||||
from datetime import datetime
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
|
||||
def _extract_article_data(article: dict) -> dict:
|
||||
"""Extract article data from yfinance news format (handles nested 'content' structure)."""
|
||||
# Handle nested content structure
|
||||
if "content" in article:
|
||||
content = article["content"]
|
||||
title = content.get("title", "No title")
|
||||
summary = content.get("summary", "")
|
||||
provider = content.get("provider", {})
|
||||
publisher = provider.get("displayName", "Unknown")
|
||||
|
||||
# Get URL from canonicalUrl or clickThroughUrl
|
||||
url_obj = content.get("canonicalUrl") or content.get("clickThroughUrl") or {}
|
||||
link = url_obj.get("url", "")
|
||||
|
||||
# Get publish date
|
||||
pub_date_str = content.get("pubDate", "")
|
||||
pub_date = None
|
||||
if pub_date_str:
|
||||
try:
|
||||
pub_date = datetime.fromisoformat(pub_date_str.replace("Z", "+00:00"))
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
|
||||
return {
|
||||
"title": title,
|
||||
"summary": summary,
|
||||
"publisher": publisher,
|
||||
"link": link,
|
||||
"pub_date": pub_date,
|
||||
}
|
||||
else:
|
||||
# Fallback for flat structure
|
||||
return {
|
||||
"title": article.get("title", "No title"),
|
||||
"summary": article.get("summary", ""),
|
||||
"publisher": article.get("publisher", "Unknown"),
|
||||
"link": article.get("link", ""),
|
||||
"pub_date": None,
|
||||
}
|
||||
|
||||
|
||||
def get_news_yfinance(
|
||||
ticker: str,
|
||||
start_date: str,
|
||||
end_date: str,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve news for a specific stock ticker using yfinance.
|
||||
|
||||
Args:
|
||||
ticker: Stock ticker symbol (e.g., "AAPL")
|
||||
start_date: Start date in yyyy-mm-dd format
|
||||
end_date: End date in yyyy-mm-dd format
|
||||
|
||||
Returns:
|
||||
Formatted string containing news articles
|
||||
"""
|
||||
try:
|
||||
stock = yf.Ticker(ticker)
|
||||
news = stock.get_news(count=20)
|
||||
|
||||
if not news:
|
||||
return f"No news found for {ticker}"
|
||||
|
||||
# Parse date range for filtering
|
||||
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
end_dt = datetime.strptime(end_date, "%Y-%m-%d")
|
||||
|
||||
news_str = ""
|
||||
filtered_count = 0
|
||||
|
||||
for article in news:
|
||||
data = _extract_article_data(article)
|
||||
|
||||
# Filter by date if publish time is available
|
||||
if data["pub_date"]:
|
||||
pub_date_naive = data["pub_date"].replace(tzinfo=None)
|
||||
if not (start_dt <= pub_date_naive <= end_dt + relativedelta(days=1)):
|
||||
continue
|
||||
|
||||
news_str += f"### {data['title']} (source: {data['publisher']})\n"
|
||||
if data["summary"]:
|
||||
news_str += f"{data['summary']}\n"
|
||||
if data["link"]:
|
||||
news_str += f"Link: {data['link']}\n"
|
||||
news_str += "\n"
|
||||
filtered_count += 1
|
||||
|
||||
if filtered_count == 0:
|
||||
return f"No news found for {ticker} between {start_date} and {end_date}"
|
||||
|
||||
return f"## {ticker} News, from {start_date} to {end_date}:\n\n{news_str}"
|
||||
|
||||
except Exception as e:
|
||||
return f"Error fetching news for {ticker}: {str(e)}"
|
||||
|
||||
|
||||
def get_global_news_yfinance(
|
||||
curr_date: str,
|
||||
look_back_days: int = 7,
|
||||
limit: int = 10,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve global/macro economic news using yfinance Search.
|
||||
|
||||
Args:
|
||||
curr_date: Current date in yyyy-mm-dd format
|
||||
look_back_days: Number of days to look back
|
||||
limit: Maximum number of articles to return
|
||||
|
||||
Returns:
|
||||
Formatted string containing global news articles
|
||||
"""
|
||||
# Search queries for macro/global news
|
||||
search_queries = [
|
||||
"stock market economy",
|
||||
"Federal Reserve interest rates",
|
||||
"inflation economic outlook",
|
||||
"global markets trading",
|
||||
]
|
||||
|
||||
all_news = []
|
||||
seen_titles = set()
|
||||
|
||||
try:
|
||||
for query in search_queries:
|
||||
search = yf.Search(
|
||||
query=query,
|
||||
news_count=limit,
|
||||
enable_fuzzy_query=True,
|
||||
)
|
||||
|
||||
if search.news:
|
||||
for article in search.news:
|
||||
# Handle both flat and nested structures
|
||||
if "content" in article:
|
||||
data = _extract_article_data(article)
|
||||
title = data["title"]
|
||||
else:
|
||||
title = article.get("title", "")
|
||||
|
||||
# Deduplicate by title
|
||||
if title and title not in seen_titles:
|
||||
seen_titles.add(title)
|
||||
all_news.append(article)
|
||||
|
||||
if len(all_news) >= limit:
|
||||
break
|
||||
|
||||
if not all_news:
|
||||
return f"No global news found for {curr_date}"
|
||||
|
||||
# Calculate date range
|
||||
curr_dt = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
start_dt = curr_dt - relativedelta(days=look_back_days)
|
||||
start_date = start_dt.strftime("%Y-%m-%d")
|
||||
|
||||
news_str = ""
|
||||
for article in all_news[:limit]:
|
||||
# Handle both flat and nested structures
|
||||
if "content" in article:
|
||||
data = _extract_article_data(article)
|
||||
title = data["title"]
|
||||
publisher = data["publisher"]
|
||||
link = data["link"]
|
||||
summary = data["summary"]
|
||||
else:
|
||||
title = article.get("title", "No title")
|
||||
publisher = article.get("publisher", "Unknown")
|
||||
link = article.get("link", "")
|
||||
summary = ""
|
||||
|
||||
news_str += f"### {title} (source: {publisher})\n"
|
||||
if summary:
|
||||
news_str += f"{summary}\n"
|
||||
if link:
|
||||
news_str += f"Link: {link}\n"
|
||||
news_str += "\n"
|
||||
|
||||
return f"## Global Market News, from {start_date} to {curr_date}:\n\n{news_str}"
|
||||
|
||||
except Exception as e:
|
||||
return f"Error fetching global news: {str(e)}"
|
||||
"""yfinance-based news data fetching functions."""
|
||||
|
||||
import yfinance as yf
|
||||
from datetime import datetime
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
|
||||
def _extract_article_data(article: dict) -> dict:
|
||||
"""Extract article data from yfinance news format (handles nested 'content' structure)."""
|
||||
# Handle nested content structure
|
||||
if "content" in article:
|
||||
content = article["content"]
|
||||
title = content.get("title", "No title")
|
||||
summary = content.get("summary", "")
|
||||
provider = content.get("provider", {})
|
||||
publisher = provider.get("displayName", "Unknown")
|
||||
|
||||
# Get URL from canonicalUrl or clickThroughUrl
|
||||
url_obj = content.get("canonicalUrl") or content.get("clickThroughUrl") or {}
|
||||
link = url_obj.get("url", "")
|
||||
|
||||
# Get publish date
|
||||
pub_date_str = content.get("pubDate", "")
|
||||
pub_date = None
|
||||
if pub_date_str:
|
||||
try:
|
||||
pub_date = datetime.fromisoformat(pub_date_str.replace("Z", "+00:00"))
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
|
||||
return {
|
||||
"title": title,
|
||||
"summary": summary,
|
||||
"publisher": publisher,
|
||||
"link": link,
|
||||
"pub_date": pub_date,
|
||||
}
|
||||
else:
|
||||
# Fallback for flat structure
|
||||
return {
|
||||
"title": article.get("title", "No title"),
|
||||
"summary": article.get("summary", ""),
|
||||
"publisher": article.get("publisher", "Unknown"),
|
||||
"link": article.get("link", ""),
|
||||
"pub_date": None,
|
||||
}
|
||||
|
||||
|
||||
def get_news_yfinance(
|
||||
ticker: str,
|
||||
start_date: str,
|
||||
end_date: str,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve news for a specific stock ticker using yfinance.
|
||||
|
||||
Args:
|
||||
ticker: Stock ticker symbol (e.g., "AAPL")
|
||||
start_date: Start date in yyyy-mm-dd format
|
||||
end_date: End date in yyyy-mm-dd format
|
||||
|
||||
Returns:
|
||||
Formatted string containing news articles
|
||||
"""
|
||||
try:
|
||||
stock = yf.Ticker(ticker)
|
||||
news = stock.get_news(count=20)
|
||||
|
||||
if not news:
|
||||
return f"No news found for {ticker}"
|
||||
|
||||
# Parse date range for filtering
|
||||
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
end_dt = datetime.strptime(end_date, "%Y-%m-%d")
|
||||
|
||||
news_str = ""
|
||||
filtered_count = 0
|
||||
|
||||
for article in news:
|
||||
data = _extract_article_data(article)
|
||||
|
||||
# Filter by date if publish time is available
|
||||
if data["pub_date"]:
|
||||
pub_date_naive = data["pub_date"].replace(tzinfo=None)
|
||||
if not (start_dt <= pub_date_naive <= end_dt + relativedelta(days=1)):
|
||||
continue
|
||||
|
||||
news_str += f"### {data['title']} (source: {data['publisher']})\n"
|
||||
if data["summary"]:
|
||||
news_str += f"{data['summary']}\n"
|
||||
if data["link"]:
|
||||
news_str += f"Link: {data['link']}\n"
|
||||
news_str += "\n"
|
||||
filtered_count += 1
|
||||
|
||||
if filtered_count == 0:
|
||||
return f"No news found for {ticker} between {start_date} and {end_date}"
|
||||
|
||||
return f"## {ticker} News, from {start_date} to {end_date}:\n\n{news_str}"
|
||||
|
||||
except Exception as e:
|
||||
return f"Error fetching news for {ticker}: {str(e)}"
|
||||
|
||||
|
||||
def get_global_news_yfinance(
|
||||
curr_date: str,
|
||||
look_back_days: int = 7,
|
||||
limit: int = 10,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve global/macro economic news using yfinance Search.
|
||||
|
||||
Args:
|
||||
curr_date: Current date in yyyy-mm-dd format
|
||||
look_back_days: Number of days to look back
|
||||
limit: Maximum number of articles to return
|
||||
|
||||
Returns:
|
||||
Formatted string containing global news articles
|
||||
"""
|
||||
# Search queries for macro/global news
|
||||
search_queries = [
|
||||
"stock market economy",
|
||||
"Federal Reserve interest rates",
|
||||
"inflation economic outlook",
|
||||
"global markets trading",
|
||||
]
|
||||
|
||||
all_news = []
|
||||
seen_titles = set()
|
||||
|
||||
try:
|
||||
for query in search_queries:
|
||||
search = yf.Search(
|
||||
query=query,
|
||||
news_count=limit,
|
||||
enable_fuzzy_query=True,
|
||||
)
|
||||
|
||||
if search.news:
|
||||
for article in search.news:
|
||||
# Handle both flat and nested structures
|
||||
if "content" in article:
|
||||
data = _extract_article_data(article)
|
||||
title = data["title"]
|
||||
else:
|
||||
title = article.get("title", "")
|
||||
|
||||
# Deduplicate by title
|
||||
if title and title not in seen_titles:
|
||||
seen_titles.add(title)
|
||||
all_news.append(article)
|
||||
|
||||
if len(all_news) >= limit:
|
||||
break
|
||||
|
||||
if not all_news:
|
||||
return f"No global news found for {curr_date}"
|
||||
|
||||
# Calculate date range
|
||||
curr_dt = datetime.strptime(curr_date, "%Y-%m-%d")
|
||||
start_dt = curr_dt - relativedelta(days=look_back_days)
|
||||
start_date = start_dt.strftime("%Y-%m-%d")
|
||||
|
||||
news_str = ""
|
||||
for article in all_news[:limit]:
|
||||
# Handle both flat and nested structures
|
||||
if "content" in article:
|
||||
data = _extract_article_data(article)
|
||||
title = data["title"]
|
||||
publisher = data["publisher"]
|
||||
link = data["link"]
|
||||
summary = data["summary"]
|
||||
else:
|
||||
title = article.get("title", "No title")
|
||||
publisher = article.get("publisher", "Unknown")
|
||||
link = article.get("link", "")
|
||||
summary = ""
|
||||
|
||||
news_str += f"### {title} (source: {publisher})\n"
|
||||
if summary:
|
||||
news_str += f"{summary}\n"
|
||||
if link:
|
||||
news_str += f"Link: {link}\n"
|
||||
news_str += "\n"
|
||||
|
||||
return f"## Global Market News, from {start_date} to {curr_date}:\n\n{news_str}"
|
||||
|
||||
except Exception as e:
|
||||
return f"Error fetching global news: {str(e)}"
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
# TradingAgents/graph/__init__.py
|
||||
|
||||
from .trading_graph import TradingAgentsGraph
|
||||
from .setup import StructuredGraphSetup
|
||||
|
||||
__all__ = [
|
||||
"TradingAgentsGraph",
|
||||
"StructuredGraphSetup",
|
||||
]
|
||||
# TradingAgents/graph/__init__.py
|
||||
|
||||
from .trading_graph import TradingAgentsGraph
|
||||
from .setup import StructuredGraphSetup
|
||||
|
||||
__all__ = [
|
||||
"TradingAgentsGraph",
|
||||
"StructuredGraphSetup",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,67 +1,67 @@
|
|||
# TradingAgents/graph/conditional_logic.py
|
||||
|
||||
from tradingagents.agents.utils.agent_states import AgentState
|
||||
|
||||
|
||||
class ConditionalLogic:
|
||||
"""Handles conditional logic for determining graph flow."""
|
||||
|
||||
def __init__(self, max_debate_rounds=1, max_risk_discuss_rounds=1):
|
||||
"""Initialize with configuration parameters."""
|
||||
self.max_debate_rounds = max_debate_rounds
|
||||
self.max_risk_discuss_rounds = max_risk_discuss_rounds
|
||||
|
||||
def should_continue_market(self, state: AgentState):
|
||||
"""Determine if market analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_market"
|
||||
return "Msg Clear Market"
|
||||
|
||||
def should_continue_social(self, state: AgentState):
|
||||
"""Determine if social media analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_social"
|
||||
return "Msg Clear Social"
|
||||
|
||||
def should_continue_news(self, state: AgentState):
|
||||
"""Determine if news analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_news"
|
||||
return "Msg Clear News"
|
||||
|
||||
def should_continue_fundamentals(self, state: AgentState):
|
||||
"""Determine if fundamentals analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_fundamentals"
|
||||
return "Msg Clear Fundamentals"
|
||||
|
||||
def should_continue_debate(self, state: AgentState) -> str:
|
||||
"""Determine if debate should continue."""
|
||||
|
||||
if (
|
||||
state["investment_debate_state"]["count"] >= 2 * self.max_debate_rounds
|
||||
): # 3 rounds of back-and-forth between 2 agents
|
||||
return "Research Manager"
|
||||
if state["investment_debate_state"]["current_response"].startswith("Bull"):
|
||||
return "Bear Researcher"
|
||||
return "Bull Researcher"
|
||||
|
||||
def should_continue_risk_analysis(self, state: AgentState) -> str:
|
||||
"""Determine if risk analysis should continue."""
|
||||
if (
|
||||
state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds
|
||||
): # 3 rounds of back-and-forth between 3 agents
|
||||
return "Risk Judge"
|
||||
if state["risk_debate_state"]["latest_speaker"].startswith("Aggressive"):
|
||||
return "Conservative Analyst"
|
||||
if state["risk_debate_state"]["latest_speaker"].startswith("Conservative"):
|
||||
return "Neutral Analyst"
|
||||
return "Aggressive Analyst"
|
||||
# TradingAgents/graph/conditional_logic.py
|
||||
|
||||
from tradingagents.agents.utils.agent_states import AgentState
|
||||
|
||||
|
||||
class ConditionalLogic:
|
||||
"""Handles conditional logic for determining graph flow."""
|
||||
|
||||
def __init__(self, max_debate_rounds=1, max_risk_discuss_rounds=1):
|
||||
"""Initialize with configuration parameters."""
|
||||
self.max_debate_rounds = max_debate_rounds
|
||||
self.max_risk_discuss_rounds = max_risk_discuss_rounds
|
||||
|
||||
def should_continue_market(self, state: AgentState):
|
||||
"""Determine if market analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_market"
|
||||
return "Msg Clear Market"
|
||||
|
||||
def should_continue_social(self, state: AgentState):
|
||||
"""Determine if social media analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_social"
|
||||
return "Msg Clear Social"
|
||||
|
||||
def should_continue_news(self, state: AgentState):
|
||||
"""Determine if news analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_news"
|
||||
return "Msg Clear News"
|
||||
|
||||
def should_continue_fundamentals(self, state: AgentState):
|
||||
"""Determine if fundamentals analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_fundamentals"
|
||||
return "Msg Clear Fundamentals"
|
||||
|
||||
def should_continue_debate(self, state: AgentState) -> str:
|
||||
"""Determine if debate should continue."""
|
||||
|
||||
if (
|
||||
state["investment_debate_state"]["count"] >= 2 * self.max_debate_rounds
|
||||
): # 3 rounds of back-and-forth between 2 agents
|
||||
return "Research Manager"
|
||||
if state["investment_debate_state"]["current_response"].startswith("Bull"):
|
||||
return "Bear Researcher"
|
||||
return "Bull Researcher"
|
||||
|
||||
def should_continue_risk_analysis(self, state: AgentState) -> str:
|
||||
"""Determine if risk analysis should continue."""
|
||||
if (
|
||||
state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds
|
||||
): # 3 rounds of back-and-forth between 3 agents
|
||||
return "Risk Judge"
|
||||
if state["risk_debate_state"]["latest_speaker"].startswith("Aggressive"):
|
||||
return "Conservative Analyst"
|
||||
if state["risk_debate_state"]["latest_speaker"].startswith("Conservative"):
|
||||
return "Neutral Analyst"
|
||||
return "Aggressive Analyst"
|
||||
|
|
|
|||
|
|
@ -1,222 +1,222 @@
|
|||
"""Parallel execution nodes for TradingAgents.
|
||||
|
||||
Provides parallel wrappers for:
|
||||
- Analyst phase (Market, Social, News, Fundamentals)
|
||||
- Research debate phase (Bull + Bear)
|
||||
- Risk debate phase (Aggressive + Conservative + Neutral)
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from langchain_core.messages import HumanMessage, RemoveMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_parallel_analyst_node(analyst_fns, tool_nodes, selected_analysts):
|
||||
"""Create a single LangGraph node that runs all analysts in parallel.
|
||||
|
||||
Each analyst gets its own isolated message state and runs its complete
|
||||
tool-calling loop independently. Results are merged at the end.
|
||||
|
||||
Args:
|
||||
analyst_fns: dict mapping analyst type (e.g. "market") to node function
|
||||
tool_nodes: dict mapping analyst type to ToolNode instance
|
||||
selected_analysts: list of analyst types to run
|
||||
"""
|
||||
|
||||
async def parallel_analysts_node(state):
|
||||
"""Run all analysts concurrently and merge their reports."""
|
||||
|
||||
async def run_single(analyst_type):
|
||||
"""Run one analyst through its complete tool-calling loop."""
|
||||
fn = analyst_fns[analyst_type]
|
||||
tn = tool_nodes[analyst_type]
|
||||
|
||||
# Each analyst gets its own isolated message state
|
||||
local_state = {
|
||||
"messages": list(state["messages"]),
|
||||
"trade_date": state["trade_date"],
|
||||
"company_of_interest": state["company_of_interest"],
|
||||
}
|
||||
|
||||
result = {}
|
||||
for _ in range(10): # safety limit on tool rounds
|
||||
result = await asyncio.to_thread(fn, local_state)
|
||||
ai_msg = result["messages"][0]
|
||||
local_state["messages"] = local_state["messages"] + [ai_msg]
|
||||
|
||||
if not ai_msg.tool_calls:
|
||||
break
|
||||
|
||||
# Process tool calls
|
||||
tool_result = await asyncio.to_thread(tn.invoke, local_state)
|
||||
local_state["messages"] = (
|
||||
local_state["messages"] + tool_result["messages"]
|
||||
)
|
||||
|
||||
# Return only report fields (not messages)
|
||||
return {k: v for k, v in result.items() if k != "messages"}
|
||||
|
||||
# Run all analysts concurrently
|
||||
tasks = [run_single(at) for at in selected_analysts if at in analyst_fns]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
# Merge all report fields
|
||||
merged = {}
|
||||
for r in results:
|
||||
merged.update(r)
|
||||
|
||||
# Clear messages and add placeholder (same as Msg Clear nodes)
|
||||
messages = state.get("messages", [])
|
||||
removal_ops = [
|
||||
RemoveMessage(id=m.id)
|
||||
for m in messages
|
||||
if hasattr(m, "id") and m.id
|
||||
]
|
||||
merged["messages"] = removal_ops + [HumanMessage(content="Continue")]
|
||||
|
||||
return merged
|
||||
|
||||
return parallel_analysts_node
|
||||
|
||||
|
||||
def _snapshot_research_state(state):
|
||||
"""Extract research-relevant fields into a plain dict."""
|
||||
return {
|
||||
"investment_debate_state": dict(state.get("investment_debate_state", {})),
|
||||
"market_report": state.get("market_report", ""),
|
||||
"sentiment_report": state.get("sentiment_report", ""),
|
||||
"news_report": state.get("news_report", ""),
|
||||
"fundamentals_report": state.get("fundamentals_report", ""),
|
||||
}
|
||||
|
||||
|
||||
def _snapshot_risk_state(state):
|
||||
"""Extract risk-relevant fields into a plain dict."""
|
||||
return {
|
||||
"risk_debate_state": dict(state.get("risk_debate_state", {})),
|
||||
"market_report": state.get("market_report", ""),
|
||||
"sentiment_report": state.get("sentiment_report", ""),
|
||||
"news_report": state.get("news_report", ""),
|
||||
"fundamentals_report": state.get("fundamentals_report", ""),
|
||||
"trader_investment_plan": state.get("trader_investment_plan", ""),
|
||||
}
|
||||
|
||||
|
||||
def create_parallel_research_node(bull_fn, bear_fn):
|
||||
"""Create a node that runs Bull and Bear researchers in parallel.
|
||||
|
||||
Uses async + asyncio.to_thread + asyncio.gather — the same pattern
|
||||
that works for create_parallel_analyst_node.
|
||||
"""
|
||||
|
||||
async def parallel_research_node(state):
|
||||
import time
|
||||
import sys
|
||||
|
||||
state_snap = _snapshot_research_state(state)
|
||||
t0 = time.time()
|
||||
|
||||
async def run_bull():
|
||||
print(f"[PARALLEL] Bull starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(bull_fn, state_snap)
|
||||
print(f"[PARALLEL] Bull done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
async def run_bear():
|
||||
print(f"[PARALLEL] Bear starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(bear_fn, state_snap)
|
||||
print(f"[PARALLEL] Bear done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
bull_result, bear_result = await asyncio.gather(run_bull(), run_bear())
|
||||
|
||||
print(f"[PARALLEL] Research total: {time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
|
||||
bull_debate = bull_result["investment_debate_state"]
|
||||
bear_debate = bear_result["investment_debate_state"]
|
||||
|
||||
merged_debate = {
|
||||
"bull_history": bull_debate.get("bull_history", ""),
|
||||
"bear_history": bear_debate.get("bear_history", ""),
|
||||
"history": bull_debate.get("bull_history", "")
|
||||
+ "\n"
|
||||
+ bear_debate.get("bear_history", ""),
|
||||
"current_response": bear_debate.get("current_response", ""),
|
||||
"judge_decision": "",
|
||||
"count": 2,
|
||||
}
|
||||
return {"investment_debate_state": merged_debate}
|
||||
|
||||
return parallel_research_node
|
||||
|
||||
|
||||
def create_parallel_risk_node(aggressive_fn, conservative_fn, neutral_fn):
|
||||
"""Create a node that runs all 3 risk analysts in parallel.
|
||||
|
||||
Uses async + asyncio.to_thread + asyncio.gather — the same pattern
|
||||
that works for create_parallel_analyst_node.
|
||||
"""
|
||||
|
||||
async def parallel_risk_node(state):
|
||||
import time
|
||||
import sys
|
||||
|
||||
state_snap = _snapshot_risk_state(state)
|
||||
t0 = time.time()
|
||||
|
||||
async def run_agg():
|
||||
print(f"[PARALLEL] Aggressive starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(aggressive_fn, state_snap)
|
||||
print(f"[PARALLEL] Aggressive done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
async def run_con():
|
||||
print(f"[PARALLEL] Conservative starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(conservative_fn, state_snap)
|
||||
print(f"[PARALLEL] Conservative done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
async def run_neu():
|
||||
print(f"[PARALLEL] Neutral starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(neutral_fn, state_snap)
|
||||
print(f"[PARALLEL] Neutral done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
agg_result, con_result, neu_result = await asyncio.gather(
|
||||
run_agg(), run_con(), run_neu()
|
||||
)
|
||||
|
||||
print(f"[PARALLEL] Risk total: {time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
|
||||
agg_debate = agg_result["risk_debate_state"]
|
||||
con_debate = con_result["risk_debate_state"]
|
||||
neu_debate = neu_result["risk_debate_state"]
|
||||
|
||||
merged_debate = {
|
||||
"aggressive_history": agg_debate.get("aggressive_history", ""),
|
||||
"conservative_history": con_debate.get("conservative_history", ""),
|
||||
"neutral_history": neu_debate.get("neutral_history", ""),
|
||||
"history": agg_debate.get("aggressive_history", "")
|
||||
+ "\n"
|
||||
+ con_debate.get("conservative_history", "")
|
||||
+ "\n"
|
||||
+ neu_debate.get("neutral_history", ""),
|
||||
"latest_speaker": "Neutral",
|
||||
"current_aggressive_response": agg_debate.get(
|
||||
"current_aggressive_response", ""
|
||||
),
|
||||
"current_conservative_response": con_debate.get(
|
||||
"current_conservative_response", ""
|
||||
),
|
||||
"current_neutral_response": neu_debate.get(
|
||||
"current_neutral_response", ""
|
||||
),
|
||||
"judge_decision": "",
|
||||
"count": 3,
|
||||
}
|
||||
return {"risk_debate_state": merged_debate}
|
||||
|
||||
return parallel_risk_node
|
||||
"""Parallel execution nodes for TradingAgents.
|
||||
|
||||
Provides parallel wrappers for:
|
||||
- Analyst phase (Market, Social, News, Fundamentals)
|
||||
- Research debate phase (Bull + Bear)
|
||||
- Risk debate phase (Aggressive + Conservative + Neutral)
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from langchain_core.messages import HumanMessage, RemoveMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_parallel_analyst_node(analyst_fns, tool_nodes, selected_analysts):
|
||||
"""Create a single LangGraph node that runs all analysts in parallel.
|
||||
|
||||
Each analyst gets its own isolated message state and runs its complete
|
||||
tool-calling loop independently. Results are merged at the end.
|
||||
|
||||
Args:
|
||||
analyst_fns: dict mapping analyst type (e.g. "market") to node function
|
||||
tool_nodes: dict mapping analyst type to ToolNode instance
|
||||
selected_analysts: list of analyst types to run
|
||||
"""
|
||||
|
||||
async def parallel_analysts_node(state):
|
||||
"""Run all analysts concurrently and merge their reports."""
|
||||
|
||||
async def run_single(analyst_type):
|
||||
"""Run one analyst through its complete tool-calling loop."""
|
||||
fn = analyst_fns[analyst_type]
|
||||
tn = tool_nodes[analyst_type]
|
||||
|
||||
# Each analyst gets its own isolated message state
|
||||
local_state = {
|
||||
"messages": list(state["messages"]),
|
||||
"trade_date": state["trade_date"],
|
||||
"company_of_interest": state["company_of_interest"],
|
||||
}
|
||||
|
||||
result = {}
|
||||
for _ in range(10): # safety limit on tool rounds
|
||||
result = await asyncio.to_thread(fn, local_state)
|
||||
ai_msg = result["messages"][0]
|
||||
local_state["messages"] = local_state["messages"] + [ai_msg]
|
||||
|
||||
if not ai_msg.tool_calls:
|
||||
break
|
||||
|
||||
# Process tool calls
|
||||
tool_result = await asyncio.to_thread(tn.invoke, local_state)
|
||||
local_state["messages"] = (
|
||||
local_state["messages"] + tool_result["messages"]
|
||||
)
|
||||
|
||||
# Return only report fields (not messages)
|
||||
return {k: v for k, v in result.items() if k != "messages"}
|
||||
|
||||
# Run all analysts concurrently
|
||||
tasks = [run_single(at) for at in selected_analysts if at in analyst_fns]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
# Merge all report fields
|
||||
merged = {}
|
||||
for r in results:
|
||||
merged.update(r)
|
||||
|
||||
# Clear messages and add placeholder (same as Msg Clear nodes)
|
||||
messages = state.get("messages", [])
|
||||
removal_ops = [
|
||||
RemoveMessage(id=m.id)
|
||||
for m in messages
|
||||
if hasattr(m, "id") and m.id
|
||||
]
|
||||
merged["messages"] = removal_ops + [HumanMessage(content="Continue")]
|
||||
|
||||
return merged
|
||||
|
||||
return parallel_analysts_node
|
||||
|
||||
|
||||
def _snapshot_research_state(state):
|
||||
"""Extract research-relevant fields into a plain dict."""
|
||||
return {
|
||||
"investment_debate_state": dict(state.get("investment_debate_state", {})),
|
||||
"market_report": state.get("market_report", ""),
|
||||
"sentiment_report": state.get("sentiment_report", ""),
|
||||
"news_report": state.get("news_report", ""),
|
||||
"fundamentals_report": state.get("fundamentals_report", ""),
|
||||
}
|
||||
|
||||
|
||||
def _snapshot_risk_state(state):
|
||||
"""Extract risk-relevant fields into a plain dict."""
|
||||
return {
|
||||
"risk_debate_state": dict(state.get("risk_debate_state", {})),
|
||||
"market_report": state.get("market_report", ""),
|
||||
"sentiment_report": state.get("sentiment_report", ""),
|
||||
"news_report": state.get("news_report", ""),
|
||||
"fundamentals_report": state.get("fundamentals_report", ""),
|
||||
"trader_investment_plan": state.get("trader_investment_plan", ""),
|
||||
}
|
||||
|
||||
|
||||
def create_parallel_research_node(bull_fn, bear_fn):
|
||||
"""Create a node that runs Bull and Bear researchers in parallel.
|
||||
|
||||
Uses async + asyncio.to_thread + asyncio.gather — the same pattern
|
||||
that works for create_parallel_analyst_node.
|
||||
"""
|
||||
|
||||
async def parallel_research_node(state):
|
||||
import time
|
||||
import sys
|
||||
|
||||
state_snap = _snapshot_research_state(state)
|
||||
t0 = time.time()
|
||||
|
||||
async def run_bull():
|
||||
print(f"[PARALLEL] Bull starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(bull_fn, state_snap)
|
||||
print(f"[PARALLEL] Bull done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
async def run_bear():
|
||||
print(f"[PARALLEL] Bear starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(bear_fn, state_snap)
|
||||
print(f"[PARALLEL] Bear done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
bull_result, bear_result = await asyncio.gather(run_bull(), run_bear())
|
||||
|
||||
print(f"[PARALLEL] Research total: {time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
|
||||
bull_debate = bull_result["investment_debate_state"]
|
||||
bear_debate = bear_result["investment_debate_state"]
|
||||
|
||||
merged_debate = {
|
||||
"bull_history": bull_debate.get("bull_history", ""),
|
||||
"bear_history": bear_debate.get("bear_history", ""),
|
||||
"history": bull_debate.get("bull_history", "")
|
||||
+ "\n"
|
||||
+ bear_debate.get("bear_history", ""),
|
||||
"current_response": bear_debate.get("current_response", ""),
|
||||
"judge_decision": "",
|
||||
"count": 2,
|
||||
}
|
||||
return {"investment_debate_state": merged_debate}
|
||||
|
||||
return parallel_research_node
|
||||
|
||||
|
||||
def create_parallel_risk_node(aggressive_fn, conservative_fn, neutral_fn):
|
||||
"""Create a node that runs all 3 risk analysts in parallel.
|
||||
|
||||
Uses async + asyncio.to_thread + asyncio.gather — the same pattern
|
||||
that works for create_parallel_analyst_node.
|
||||
"""
|
||||
|
||||
async def parallel_risk_node(state):
|
||||
import time
|
||||
import sys
|
||||
|
||||
state_snap = _snapshot_risk_state(state)
|
||||
t0 = time.time()
|
||||
|
||||
async def run_agg():
|
||||
print(f"[PARALLEL] Aggressive starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(aggressive_fn, state_snap)
|
||||
print(f"[PARALLEL] Aggressive done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
async def run_con():
|
||||
print(f"[PARALLEL] Conservative starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(conservative_fn, state_snap)
|
||||
print(f"[PARALLEL] Conservative done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
async def run_neu():
|
||||
print(f"[PARALLEL] Neutral starting at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
result = await asyncio.to_thread(neutral_fn, state_snap)
|
||||
print(f"[PARALLEL] Neutral done at +{time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
return result
|
||||
|
||||
agg_result, con_result, neu_result = await asyncio.gather(
|
||||
run_agg(), run_con(), run_neu()
|
||||
)
|
||||
|
||||
print(f"[PARALLEL] Risk total: {time.time()-t0:.1f}s", file=sys.stderr, flush=True)
|
||||
|
||||
agg_debate = agg_result["risk_debate_state"]
|
||||
con_debate = con_result["risk_debate_state"]
|
||||
neu_debate = neu_result["risk_debate_state"]
|
||||
|
||||
merged_debate = {
|
||||
"aggressive_history": agg_debate.get("aggressive_history", ""),
|
||||
"conservative_history": con_debate.get("conservative_history", ""),
|
||||
"neutral_history": neu_debate.get("neutral_history", ""),
|
||||
"history": agg_debate.get("aggressive_history", "")
|
||||
+ "\n"
|
||||
+ con_debate.get("conservative_history", "")
|
||||
+ "\n"
|
||||
+ neu_debate.get("neutral_history", ""),
|
||||
"latest_speaker": "Neutral",
|
||||
"current_aggressive_response": agg_debate.get(
|
||||
"current_aggressive_response", ""
|
||||
),
|
||||
"current_conservative_response": con_debate.get(
|
||||
"current_conservative_response", ""
|
||||
),
|
||||
"current_neutral_response": neu_debate.get(
|
||||
"current_neutral_response", ""
|
||||
),
|
||||
"judge_decision": "",
|
||||
"count": 3,
|
||||
}
|
||||
return {"risk_debate_state": merged_debate}
|
||||
|
||||
return parallel_risk_node
|
||||
|
|
|
|||
|
|
@ -1,57 +1,57 @@
|
|||
# TradingAgents/graph/propagation.py
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from tradingagents.agents.utils.agent_states import (
|
||||
AgentState,
|
||||
InvestDebateState,
|
||||
RiskDebateState,
|
||||
)
|
||||
|
||||
|
||||
class Propagator:
|
||||
"""Handles state initialization and propagation through the graph."""
|
||||
|
||||
def __init__(self, max_recur_limit=100):
|
||||
"""Initialize with configuration parameters."""
|
||||
self.max_recur_limit = max_recur_limit
|
||||
|
||||
def create_initial_state(
|
||||
self, company_name: str, trade_date: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Create the initial state for the agent graph."""
|
||||
return {
|
||||
"messages": [("human", company_name)],
|
||||
"company_of_interest": company_name,
|
||||
"trade_date": str(trade_date),
|
||||
"investment_debate_state": InvestDebateState(
|
||||
{"history": "", "current_response": "", "count": 0}
|
||||
),
|
||||
"risk_debate_state": RiskDebateState(
|
||||
{
|
||||
"history": "",
|
||||
"current_aggressive_response": "",
|
||||
"current_conservative_response": "",
|
||||
"current_neutral_response": "",
|
||||
"count": 0,
|
||||
}
|
||||
),
|
||||
"market_report": "",
|
||||
"fundamentals_report": "",
|
||||
"sentiment_report": "",
|
||||
"news_report": "",
|
||||
}
|
||||
|
||||
def get_graph_args(self, callbacks: Optional[List] = None) -> Dict[str, Any]:
|
||||
"""Get arguments for the graph invocation.
|
||||
|
||||
Args:
|
||||
callbacks: Optional list of callback handlers for tool execution tracking.
|
||||
Note: LLM callbacks are handled separately via LLM constructor.
|
||||
"""
|
||||
config = {"recursion_limit": self.max_recur_limit}
|
||||
if callbacks:
|
||||
config["callbacks"] = callbacks
|
||||
return {
|
||||
"stream_mode": "values",
|
||||
"config": config,
|
||||
}
|
||||
# TradingAgents/graph/propagation.py
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from tradingagents.agents.utils.agent_states import (
|
||||
AgentState,
|
||||
InvestDebateState,
|
||||
RiskDebateState,
|
||||
)
|
||||
|
||||
|
||||
class Propagator:
|
||||
"""Handles state initialization and propagation through the graph."""
|
||||
|
||||
def __init__(self, max_recur_limit=100):
|
||||
"""Initialize with configuration parameters."""
|
||||
self.max_recur_limit = max_recur_limit
|
||||
|
||||
def create_initial_state(
|
||||
self, company_name: str, trade_date: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Create the initial state for the agent graph."""
|
||||
return {
|
||||
"messages": [("human", company_name)],
|
||||
"company_of_interest": company_name,
|
||||
"trade_date": str(trade_date),
|
||||
"investment_debate_state": InvestDebateState(
|
||||
{"history": "", "current_response": "", "count": 0}
|
||||
),
|
||||
"risk_debate_state": RiskDebateState(
|
||||
{
|
||||
"history": "",
|
||||
"current_aggressive_response": "",
|
||||
"current_conservative_response": "",
|
||||
"current_neutral_response": "",
|
||||
"count": 0,
|
||||
}
|
||||
),
|
||||
"market_report": "",
|
||||
"fundamentals_report": "",
|
||||
"sentiment_report": "",
|
||||
"news_report": "",
|
||||
}
|
||||
|
||||
def get_graph_args(self, callbacks: Optional[List] = None) -> Dict[str, Any]:
|
||||
"""Get arguments for the graph invocation.
|
||||
|
||||
Args:
|
||||
callbacks: Optional list of callback handlers for tool execution tracking.
|
||||
Note: LLM callbacks are handled separately via LLM constructor.
|
||||
"""
|
||||
config = {"recursion_limit": self.max_recur_limit}
|
||||
if callbacks:
|
||||
config["callbacks"] = callbacks
|
||||
return {
|
||||
"stream_mode": "values",
|
||||
"config": config,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,121 +1,121 @@
|
|||
# TradingAgents/graph/reflection.py
|
||||
|
||||
from typing import Dict, Any
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
|
||||
class Reflector:
|
||||
"""Handles reflection on decisions and updating memory."""
|
||||
|
||||
def __init__(self, quick_thinking_llm: ChatOpenAI):
|
||||
"""Initialize the reflector with an LLM."""
|
||||
self.quick_thinking_llm = quick_thinking_llm
|
||||
self.reflection_system_prompt = self._get_reflection_prompt()
|
||||
|
||||
def _get_reflection_prompt(self) -> str:
|
||||
"""Get the system prompt for reflection."""
|
||||
return """
|
||||
You are an expert financial analyst tasked with reviewing trading decisions/analysis and providing a comprehensive, step-by-step analysis.
|
||||
Your goal is to deliver detailed insights into investment decisions and highlight opportunities for improvement, adhering strictly to the following guidelines:
|
||||
|
||||
1. Reasoning:
|
||||
- For each trading decision, determine whether it was correct or incorrect. A correct decision results in an increase in returns, while an incorrect decision does the opposite.
|
||||
- Analyze the contributing factors to each success or mistake. Consider:
|
||||
- Market intelligence.
|
||||
- Technical indicators.
|
||||
- Technical signals.
|
||||
- Price movement analysis.
|
||||
- Overall market data analysis
|
||||
- News analysis.
|
||||
- Social media and sentiment analysis.
|
||||
- Fundamental data analysis.
|
||||
- Weight the importance of each factor in the decision-making process.
|
||||
|
||||
2. Improvement:
|
||||
- For any incorrect decisions, propose revisions to maximize returns.
|
||||
- Provide a detailed list of corrective actions or improvements, including specific recommendations (e.g., changing a decision from HOLD to BUY on a particular date).
|
||||
|
||||
3. Summary:
|
||||
- Summarize the lessons learned from the successes and mistakes.
|
||||
- Highlight how these lessons can be adapted for future trading scenarios and draw connections between similar situations to apply the knowledge gained.
|
||||
|
||||
4. Query:
|
||||
- Extract key insights from the summary into a concise sentence of no more than 1000 tokens.
|
||||
- Ensure the condensed sentence captures the essence of the lessons and reasoning for easy reference.
|
||||
|
||||
Adhere strictly to these instructions, and ensure your output is detailed, accurate, and actionable. You will also be given objective descriptions of the market from a price movements, technical indicator, news, and sentiment perspective to provide more context for your analysis.
|
||||
"""
|
||||
|
||||
def _extract_current_situation(self, current_state: Dict[str, Any]) -> str:
|
||||
"""Extract the current market situation from the state."""
|
||||
curr_market_report = current_state["market_report"]
|
||||
curr_sentiment_report = current_state["sentiment_report"]
|
||||
curr_news_report = current_state["news_report"]
|
||||
curr_fundamentals_report = current_state["fundamentals_report"]
|
||||
|
||||
return f"{curr_market_report}\n\n{curr_sentiment_report}\n\n{curr_news_report}\n\n{curr_fundamentals_report}"
|
||||
|
||||
def _reflect_on_component(
|
||||
self, component_type: str, report: str, situation: str, returns_losses
|
||||
) -> str:
|
||||
"""Generate reflection for a component."""
|
||||
messages = [
|
||||
("system", self.reflection_system_prompt),
|
||||
(
|
||||
"human",
|
||||
f"Returns: {returns_losses}\n\nAnalysis/Decision: {report}\n\nObjective Market Reports for Reference: {situation}",
|
||||
),
|
||||
]
|
||||
|
||||
result = self.quick_thinking_llm.invoke(messages).content
|
||||
return result
|
||||
|
||||
def reflect_bull_researcher(self, current_state, returns_losses, bull_memory):
|
||||
"""Reflect on bull researcher's analysis and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
bull_debate_history = current_state["investment_debate_state"]["bull_history"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"BULL", bull_debate_history, situation, returns_losses
|
||||
)
|
||||
bull_memory.add_situations([(situation, result)])
|
||||
|
||||
def reflect_bear_researcher(self, current_state, returns_losses, bear_memory):
|
||||
"""Reflect on bear researcher's analysis and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
bear_debate_history = current_state["investment_debate_state"]["bear_history"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"BEAR", bear_debate_history, situation, returns_losses
|
||||
)
|
||||
bear_memory.add_situations([(situation, result)])
|
||||
|
||||
def reflect_trader(self, current_state, returns_losses, trader_memory):
|
||||
"""Reflect on trader's decision and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
trader_decision = current_state["trader_investment_plan"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"TRADER", trader_decision, situation, returns_losses
|
||||
)
|
||||
trader_memory.add_situations([(situation, result)])
|
||||
|
||||
def reflect_invest_judge(self, current_state, returns_losses, invest_judge_memory):
|
||||
"""Reflect on investment judge's decision and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
judge_decision = current_state["investment_debate_state"]["judge_decision"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"INVEST JUDGE", judge_decision, situation, returns_losses
|
||||
)
|
||||
invest_judge_memory.add_situations([(situation, result)])
|
||||
|
||||
def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory):
|
||||
"""Reflect on risk manager's decision and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
judge_decision = current_state["risk_debate_state"]["judge_decision"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"RISK JUDGE", judge_decision, situation, returns_losses
|
||||
)
|
||||
risk_manager_memory.add_situations([(situation, result)])
|
||||
# TradingAgents/graph/reflection.py
|
||||
|
||||
from typing import Dict, Any
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
|
||||
class Reflector:
|
||||
"""Handles reflection on decisions and updating memory."""
|
||||
|
||||
def __init__(self, quick_thinking_llm: ChatOpenAI):
|
||||
"""Initialize the reflector with an LLM."""
|
||||
self.quick_thinking_llm = quick_thinking_llm
|
||||
self.reflection_system_prompt = self._get_reflection_prompt()
|
||||
|
||||
def _get_reflection_prompt(self) -> str:
|
||||
"""Get the system prompt for reflection."""
|
||||
return """
|
||||
You are an expert financial analyst tasked with reviewing trading decisions/analysis and providing a comprehensive, step-by-step analysis.
|
||||
Your goal is to deliver detailed insights into investment decisions and highlight opportunities for improvement, adhering strictly to the following guidelines:
|
||||
|
||||
1. Reasoning:
|
||||
- For each trading decision, determine whether it was correct or incorrect. A correct decision results in an increase in returns, while an incorrect decision does the opposite.
|
||||
- Analyze the contributing factors to each success or mistake. Consider:
|
||||
- Market intelligence.
|
||||
- Technical indicators.
|
||||
- Technical signals.
|
||||
- Price movement analysis.
|
||||
- Overall market data analysis
|
||||
- News analysis.
|
||||
- Social media and sentiment analysis.
|
||||
- Fundamental data analysis.
|
||||
- Weight the importance of each factor in the decision-making process.
|
||||
|
||||
2. Improvement:
|
||||
- For any incorrect decisions, propose revisions to maximize returns.
|
||||
- Provide a detailed list of corrective actions or improvements, including specific recommendations (e.g., changing a decision from HOLD to BUY on a particular date).
|
||||
|
||||
3. Summary:
|
||||
- Summarize the lessons learned from the successes and mistakes.
|
||||
- Highlight how these lessons can be adapted for future trading scenarios and draw connections between similar situations to apply the knowledge gained.
|
||||
|
||||
4. Query:
|
||||
- Extract key insights from the summary into a concise sentence of no more than 1000 tokens.
|
||||
- Ensure the condensed sentence captures the essence of the lessons and reasoning for easy reference.
|
||||
|
||||
Adhere strictly to these instructions, and ensure your output is detailed, accurate, and actionable. You will also be given objective descriptions of the market from a price movements, technical indicator, news, and sentiment perspective to provide more context for your analysis.
|
||||
"""
|
||||
|
||||
def _extract_current_situation(self, current_state: Dict[str, Any]) -> str:
|
||||
"""Extract the current market situation from the state."""
|
||||
curr_market_report = current_state["market_report"]
|
||||
curr_sentiment_report = current_state["sentiment_report"]
|
||||
curr_news_report = current_state["news_report"]
|
||||
curr_fundamentals_report = current_state["fundamentals_report"]
|
||||
|
||||
return f"{curr_market_report}\n\n{curr_sentiment_report}\n\n{curr_news_report}\n\n{curr_fundamentals_report}"
|
||||
|
||||
def _reflect_on_component(
|
||||
self, component_type: str, report: str, situation: str, returns_losses
|
||||
) -> str:
|
||||
"""Generate reflection for a component."""
|
||||
messages = [
|
||||
("system", self.reflection_system_prompt),
|
||||
(
|
||||
"human",
|
||||
f"Returns: {returns_losses}\n\nAnalysis/Decision: {report}\n\nObjective Market Reports for Reference: {situation}",
|
||||
),
|
||||
]
|
||||
|
||||
result = self.quick_thinking_llm.invoke(messages).content
|
||||
return result
|
||||
|
||||
def reflect_bull_researcher(self, current_state, returns_losses, bull_memory):
|
||||
"""Reflect on bull researcher's analysis and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
bull_debate_history = current_state["investment_debate_state"]["bull_history"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"BULL", bull_debate_history, situation, returns_losses
|
||||
)
|
||||
bull_memory.add_situations([(situation, result)])
|
||||
|
||||
def reflect_bear_researcher(self, current_state, returns_losses, bear_memory):
|
||||
"""Reflect on bear researcher's analysis and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
bear_debate_history = current_state["investment_debate_state"]["bear_history"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"BEAR", bear_debate_history, situation, returns_losses
|
||||
)
|
||||
bear_memory.add_situations([(situation, result)])
|
||||
|
||||
def reflect_trader(self, current_state, returns_losses, trader_memory):
|
||||
"""Reflect on trader's decision and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
trader_decision = current_state["trader_investment_plan"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"TRADER", trader_decision, situation, returns_losses
|
||||
)
|
||||
trader_memory.add_situations([(situation, result)])
|
||||
|
||||
def reflect_invest_judge(self, current_state, returns_losses, invest_judge_memory):
|
||||
"""Reflect on investment judge's decision and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
judge_decision = current_state["investment_debate_state"]["judge_decision"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"INVEST JUDGE", judge_decision, situation, returns_losses
|
||||
)
|
||||
invest_judge_memory.add_situations([(situation, result)])
|
||||
|
||||
def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory):
|
||||
"""Reflect on risk manager's decision and update memory."""
|
||||
situation = self._extract_current_situation(current_state)
|
||||
judge_decision = current_state["risk_debate_state"]["judge_decision"]
|
||||
|
||||
result = self._reflect_on_component(
|
||||
"RISK JUDGE", judge_decision, situation, returns_losses
|
||||
)
|
||||
risk_manager_memory.add_situations([(situation, result)])
|
||||
|
|
|
|||
|
|
@ -1,208 +1,208 @@
|
|||
"""Graph setup for the structured equity ranking pipeline.
|
||||
|
||||
Pipeline stages:
|
||||
START → Validation → [veto gate] → Tier 1 (Macro+Liquidity parallel)
|
||||
→ Tier 2 (8 agents parallel) → Scoring (Archetype+MasterScore)
|
||||
→ Tier 3 (Bull+Bear parallel → Debate → Risk → FinalDecision)
|
||||
→ END
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
|
||||
from tradingagents.agents.utils.agent_states import PipelineState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StructuredGraphSetup:
|
||||
"""Builds the structured equity ranking LangGraph."""
|
||||
|
||||
def __init__(self, quick_llm, deep_llm):
|
||||
self.quick_llm = quick_llm
|
||||
self.deep_llm = deep_llm
|
||||
|
||||
def setup_graph(self):
|
||||
"""Build and compile the structured pipeline graph."""
|
||||
from tradingagents.agents.structured import (
|
||||
create_archetype_node,
|
||||
create_backlog_node,
|
||||
create_bear_case_node,
|
||||
create_bull_case_node,
|
||||
create_business_quality_node,
|
||||
create_crowding_node,
|
||||
create_debate_node,
|
||||
create_earnings_revisions_node,
|
||||
create_entry_timing_node,
|
||||
create_final_decision_node,
|
||||
create_institutional_flow_node,
|
||||
create_liquidity_node,
|
||||
create_macro_node,
|
||||
create_position_replacement_node,
|
||||
create_risk_node,
|
||||
create_scoring_node,
|
||||
create_sector_rotation_node,
|
||||
create_theme_substitution_node,
|
||||
create_validation_node,
|
||||
create_valuation_node,
|
||||
)
|
||||
|
||||
# Create node functions
|
||||
# Tier 1: cheap model (or no LLM for validation)
|
||||
validation_fn = create_validation_node()
|
||||
macro_fn = create_macro_node(self.quick_llm)
|
||||
liquidity_fn = create_liquidity_node(self.quick_llm)
|
||||
|
||||
# Tier 2: cheap model for analysis
|
||||
bq_fn = create_business_quality_node(self.quick_llm)
|
||||
inst_fn = create_institutional_flow_node(self.quick_llm)
|
||||
val_fn = create_valuation_node(self.quick_llm)
|
||||
et_fn = create_entry_timing_node(self.quick_llm)
|
||||
er_fn = create_earnings_revisions_node(self.quick_llm)
|
||||
sr_fn = create_sector_rotation_node(self.quick_llm)
|
||||
bl_fn = create_backlog_node(self.quick_llm)
|
||||
cr_fn = create_crowding_node(self.quick_llm)
|
||||
arch_fn = create_archetype_node(self.quick_llm)
|
||||
score_fn = create_scoring_node()
|
||||
|
||||
# Portfolio-level: deep model for theme/replacement analysis
|
||||
theme_fn = create_theme_substitution_node(self.deep_llm)
|
||||
replace_fn = create_position_replacement_node(self.deep_llm)
|
||||
|
||||
# Tier 3: deep model for reasoning/debate
|
||||
bull_fn = create_bull_case_node(self.deep_llm)
|
||||
bear_fn = create_bear_case_node(self.deep_llm)
|
||||
debate_fn = create_debate_node(self.deep_llm)
|
||||
risk_fn = create_risk_node(self.deep_llm)
|
||||
final_fn = create_final_decision_node(self.deep_llm)
|
||||
|
||||
# Build parallel wrapper nodes
|
||||
parallel_tier1 = _create_parallel_node(
|
||||
[("macro", macro_fn), ("liquidity", liquidity_fn)],
|
||||
"Tier 1",
|
||||
)
|
||||
parallel_tier2 = _create_parallel_node(
|
||||
[
|
||||
("business_quality", bq_fn),
|
||||
("institutional_flow", inst_fn),
|
||||
("valuation", val_fn),
|
||||
("entry_timing", et_fn),
|
||||
("earnings_revisions", er_fn),
|
||||
("sector_rotation", sr_fn),
|
||||
("backlog", bl_fn),
|
||||
("crowding", cr_fn),
|
||||
],
|
||||
"Tier 2",
|
||||
)
|
||||
parallel_bull_bear = _create_parallel_node(
|
||||
[("bull_case", bull_fn), ("bear_case", bear_fn)],
|
||||
"Bull/Bear",
|
||||
)
|
||||
|
||||
# Archetype + Score combined node
|
||||
def archetype_and_score(state):
|
||||
arch_result = arch_fn(state)
|
||||
merged = {**state, **arch_result}
|
||||
score_result = score_fn(merged)
|
||||
return {**arch_result, **score_result}
|
||||
|
||||
# Theme + Replacement combined node (sequential: theme feeds replacement)
|
||||
def theme_and_replacement(state):
|
||||
theme_result = theme_fn(state)
|
||||
merged = {**state, **theme_result}
|
||||
replace_result = replace_fn(merged)
|
||||
return {**theme_result, **replace_result}
|
||||
|
||||
# Risk + Final Decision combined node
|
||||
def risk_and_decision(state):
|
||||
risk_result = risk_fn(state)
|
||||
merged = {**state, **risk_result}
|
||||
final_result = final_fn(merged)
|
||||
return {**risk_result, **final_result}
|
||||
|
||||
# Build graph
|
||||
workflow = StateGraph(PipelineState)
|
||||
|
||||
workflow.add_node("Validation", validation_fn)
|
||||
workflow.add_node("Tier 1 Analysis", parallel_tier1)
|
||||
workflow.add_node("Tier 2 Analysis", parallel_tier2)
|
||||
workflow.add_node("Scoring", archetype_and_score)
|
||||
workflow.add_node("Portfolio Analysis", theme_and_replacement)
|
||||
workflow.add_node("Debate", parallel_bull_bear)
|
||||
workflow.add_node("Debate Referee", debate_fn)
|
||||
workflow.add_node("Decision", risk_and_decision)
|
||||
|
||||
# Edges
|
||||
workflow.add_edge(START, "Validation")
|
||||
workflow.add_conditional_edges(
|
||||
"Validation",
|
||||
_veto_gate,
|
||||
{END: END, "continue": "Tier 1 Analysis"},
|
||||
)
|
||||
workflow.add_edge("Tier 1 Analysis", "Tier 2 Analysis")
|
||||
workflow.add_edge("Tier 2 Analysis", "Scoring")
|
||||
workflow.add_edge("Scoring", "Portfolio Analysis")
|
||||
workflow.add_edge("Portfolio Analysis", "Debate")
|
||||
workflow.add_edge("Debate", "Debate Referee")
|
||||
workflow.add_edge("Debate Referee", "Decision")
|
||||
workflow.add_edge("Decision", END)
|
||||
|
||||
return workflow.compile()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _veto_gate(state: Dict[str, Any]) -> str:
|
||||
"""Check if validation resulted in a hard veto."""
|
||||
if state.get("hard_veto"):
|
||||
return END
|
||||
validation = state.get("validation") or {}
|
||||
if validation.get("veto"):
|
||||
return END
|
||||
return "continue"
|
||||
|
||||
|
||||
def _create_parallel_node(agent_fns: List[tuple], label: str):
|
||||
"""Create an async node that runs multiple agent functions in parallel.
|
||||
|
||||
Args:
|
||||
agent_fns: List of (name, fn) tuples.
|
||||
label: Label for logging.
|
||||
"""
|
||||
|
||||
async def parallel_node(state):
|
||||
t0 = time.time()
|
||||
|
||||
async def run_one(name, fn):
|
||||
logger.debug("[%s] %s starting", label, name)
|
||||
result = await asyncio.to_thread(fn, state)
|
||||
logger.debug("[%s] %s done (%.1fs)", label, name, time.time() - t0)
|
||||
return result
|
||||
|
||||
tasks = [run_one(name, fn) for name, fn in agent_fns]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
merged: Dict[str, Any] = {}
|
||||
all_flags: list = []
|
||||
for (name, _), result in zip(agent_fns, results):
|
||||
if isinstance(result, Exception):
|
||||
logger.error("[%s] %s failed: %s", label, name, result)
|
||||
continue
|
||||
flags = result.pop("global_flags", [])
|
||||
all_flags.extend(flags)
|
||||
merged.update(result)
|
||||
if all_flags:
|
||||
merged["global_flags"] = all_flags
|
||||
|
||||
logger.info("[%s] completed in %.1fs", label, time.time() - t0)
|
||||
return merged
|
||||
|
||||
return parallel_node
|
||||
"""Graph setup for the structured equity ranking pipeline.
|
||||
|
||||
Pipeline stages:
|
||||
START → Validation → [veto gate] → Tier 1 (Macro+Liquidity parallel)
|
||||
→ Tier 2 (8 agents parallel) → Scoring (Archetype+MasterScore)
|
||||
→ Tier 3 (Bull+Bear parallel → Debate → Risk → FinalDecision)
|
||||
→ END
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
|
||||
from tradingagents.agents.utils.agent_states import PipelineState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StructuredGraphSetup:
|
||||
"""Builds the structured equity ranking LangGraph."""
|
||||
|
||||
def __init__(self, quick_llm, deep_llm):
|
||||
self.quick_llm = quick_llm
|
||||
self.deep_llm = deep_llm
|
||||
|
||||
def setup_graph(self):
|
||||
"""Build and compile the structured pipeline graph."""
|
||||
from tradingagents.agents.structured import (
|
||||
create_archetype_node,
|
||||
create_backlog_node,
|
||||
create_bear_case_node,
|
||||
create_bull_case_node,
|
||||
create_business_quality_node,
|
||||
create_crowding_node,
|
||||
create_debate_node,
|
||||
create_earnings_revisions_node,
|
||||
create_entry_timing_node,
|
||||
create_final_decision_node,
|
||||
create_institutional_flow_node,
|
||||
create_liquidity_node,
|
||||
create_macro_node,
|
||||
create_position_replacement_node,
|
||||
create_risk_node,
|
||||
create_scoring_node,
|
||||
create_sector_rotation_node,
|
||||
create_theme_substitution_node,
|
||||
create_validation_node,
|
||||
create_valuation_node,
|
||||
)
|
||||
|
||||
# Create node functions
|
||||
# Tier 1: cheap model (or no LLM for validation)
|
||||
validation_fn = create_validation_node()
|
||||
macro_fn = create_macro_node(self.quick_llm)
|
||||
liquidity_fn = create_liquidity_node(self.quick_llm)
|
||||
|
||||
# Tier 2: cheap model for analysis
|
||||
bq_fn = create_business_quality_node(self.quick_llm)
|
||||
inst_fn = create_institutional_flow_node(self.quick_llm)
|
||||
val_fn = create_valuation_node(self.quick_llm)
|
||||
et_fn = create_entry_timing_node(self.quick_llm)
|
||||
er_fn = create_earnings_revisions_node(self.quick_llm)
|
||||
sr_fn = create_sector_rotation_node(self.quick_llm)
|
||||
bl_fn = create_backlog_node(self.quick_llm)
|
||||
cr_fn = create_crowding_node(self.quick_llm)
|
||||
arch_fn = create_archetype_node(self.quick_llm)
|
||||
score_fn = create_scoring_node()
|
||||
|
||||
# Portfolio-level: deep model for theme/replacement analysis
|
||||
theme_fn = create_theme_substitution_node(self.deep_llm)
|
||||
replace_fn = create_position_replacement_node(self.deep_llm)
|
||||
|
||||
# Tier 3: deep model for reasoning/debate
|
||||
bull_fn = create_bull_case_node(self.deep_llm)
|
||||
bear_fn = create_bear_case_node(self.deep_llm)
|
||||
debate_fn = create_debate_node(self.deep_llm)
|
||||
risk_fn = create_risk_node(self.deep_llm)
|
||||
final_fn = create_final_decision_node(self.deep_llm)
|
||||
|
||||
# Build parallel wrapper nodes
|
||||
parallel_tier1 = _create_parallel_node(
|
||||
[("macro", macro_fn), ("liquidity", liquidity_fn)],
|
||||
"Tier 1",
|
||||
)
|
||||
parallel_tier2 = _create_parallel_node(
|
||||
[
|
||||
("business_quality", bq_fn),
|
||||
("institutional_flow", inst_fn),
|
||||
("valuation", val_fn),
|
||||
("entry_timing", et_fn),
|
||||
("earnings_revisions", er_fn),
|
||||
("sector_rotation", sr_fn),
|
||||
("backlog", bl_fn),
|
||||
("crowding", cr_fn),
|
||||
],
|
||||
"Tier 2",
|
||||
)
|
||||
parallel_bull_bear = _create_parallel_node(
|
||||
[("bull_case", bull_fn), ("bear_case", bear_fn)],
|
||||
"Bull/Bear",
|
||||
)
|
||||
|
||||
# Archetype + Score combined node
|
||||
def archetype_and_score(state):
|
||||
arch_result = arch_fn(state)
|
||||
merged = {**state, **arch_result}
|
||||
score_result = score_fn(merged)
|
||||
return {**arch_result, **score_result}
|
||||
|
||||
# Theme + Replacement combined node (sequential: theme feeds replacement)
|
||||
def theme_and_replacement(state):
|
||||
theme_result = theme_fn(state)
|
||||
merged = {**state, **theme_result}
|
||||
replace_result = replace_fn(merged)
|
||||
return {**theme_result, **replace_result}
|
||||
|
||||
# Risk + Final Decision combined node
|
||||
def risk_and_decision(state):
|
||||
risk_result = risk_fn(state)
|
||||
merged = {**state, **risk_result}
|
||||
final_result = final_fn(merged)
|
||||
return {**risk_result, **final_result}
|
||||
|
||||
# Build graph
|
||||
workflow = StateGraph(PipelineState)
|
||||
|
||||
workflow.add_node("Validation", validation_fn)
|
||||
workflow.add_node("Tier 1 Analysis", parallel_tier1)
|
||||
workflow.add_node("Tier 2 Analysis", parallel_tier2)
|
||||
workflow.add_node("Scoring", archetype_and_score)
|
||||
workflow.add_node("Portfolio Analysis", theme_and_replacement)
|
||||
workflow.add_node("Debate", parallel_bull_bear)
|
||||
workflow.add_node("Debate Referee", debate_fn)
|
||||
workflow.add_node("Decision", risk_and_decision)
|
||||
|
||||
# Edges
|
||||
workflow.add_edge(START, "Validation")
|
||||
workflow.add_conditional_edges(
|
||||
"Validation",
|
||||
_veto_gate,
|
||||
{END: END, "continue": "Tier 1 Analysis"},
|
||||
)
|
||||
workflow.add_edge("Tier 1 Analysis", "Tier 2 Analysis")
|
||||
workflow.add_edge("Tier 2 Analysis", "Scoring")
|
||||
workflow.add_edge("Scoring", "Portfolio Analysis")
|
||||
workflow.add_edge("Portfolio Analysis", "Debate")
|
||||
workflow.add_edge("Debate", "Debate Referee")
|
||||
workflow.add_edge("Debate Referee", "Decision")
|
||||
workflow.add_edge("Decision", END)
|
||||
|
||||
return workflow.compile()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _veto_gate(state: Dict[str, Any]) -> str:
|
||||
"""Check if validation resulted in a hard veto."""
|
||||
if state.get("hard_veto"):
|
||||
return END
|
||||
validation = state.get("validation") or {}
|
||||
if validation.get("veto"):
|
||||
return END
|
||||
return "continue"
|
||||
|
||||
|
||||
def _create_parallel_node(agent_fns: List[tuple], label: str):
|
||||
"""Create an async node that runs multiple agent functions in parallel.
|
||||
|
||||
Args:
|
||||
agent_fns: List of (name, fn) tuples.
|
||||
label: Label for logging.
|
||||
"""
|
||||
|
||||
async def parallel_node(state):
|
||||
t0 = time.time()
|
||||
|
||||
async def run_one(name, fn):
|
||||
logger.debug("[%s] %s starting", label, name)
|
||||
result = await asyncio.to_thread(fn, state)
|
||||
logger.debug("[%s] %s done (%.1fs)", label, name, time.time() - t0)
|
||||
return result
|
||||
|
||||
tasks = [run_one(name, fn) for name, fn in agent_fns]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
merged: Dict[str, Any] = {}
|
||||
all_flags: list = []
|
||||
for (name, _), result in zip(agent_fns, results):
|
||||
if isinstance(result, Exception):
|
||||
logger.error("[%s] %s failed: %s", label, name, result)
|
||||
continue
|
||||
flags = result.pop("global_flags", [])
|
||||
all_flags.extend(flags)
|
||||
merged.update(result)
|
||||
if all_flags:
|
||||
merged["global_flags"] = all_flags
|
||||
|
||||
logger.info("[%s] completed in %.1fs", label, time.time() - t0)
|
||||
return merged
|
||||
|
||||
return parallel_node
|
||||
|
|
|
|||
|
|
@ -1,31 +1,31 @@
|
|||
# TradingAgents/graph/signal_processing.py
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
|
||||
class SignalProcessor:
|
||||
"""Processes trading signals to extract actionable decisions."""
|
||||
|
||||
def __init__(self, quick_thinking_llm: ChatOpenAI):
|
||||
"""Initialize with an LLM for processing."""
|
||||
self.quick_thinking_llm = quick_thinking_llm
|
||||
|
||||
def process_signal(self, full_signal: str) -> str:
|
||||
"""
|
||||
Process a full trading signal to extract the core decision.
|
||||
|
||||
Args:
|
||||
full_signal: Complete trading signal text
|
||||
|
||||
Returns:
|
||||
Extracted decision (BUY, SELL, or HOLD)
|
||||
"""
|
||||
messages = [
|
||||
(
|
||||
"system",
|
||||
"You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.",
|
||||
),
|
||||
("human", full_signal),
|
||||
]
|
||||
|
||||
return self.quick_thinking_llm.invoke(messages).content
|
||||
# TradingAgents/graph/signal_processing.py
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
|
||||
class SignalProcessor:
|
||||
"""Processes trading signals to extract actionable decisions."""
|
||||
|
||||
def __init__(self, quick_thinking_llm: ChatOpenAI):
|
||||
"""Initialize with an LLM for processing."""
|
||||
self.quick_thinking_llm = quick_thinking_llm
|
||||
|
||||
def process_signal(self, full_signal: str) -> str:
|
||||
"""
|
||||
Process a full trading signal to extract the core decision.
|
||||
|
||||
Args:
|
||||
full_signal: Complete trading signal text
|
||||
|
||||
Returns:
|
||||
Extracted decision (BUY, SELL, or HOLD)
|
||||
"""
|
||||
messages = [
|
||||
(
|
||||
"system",
|
||||
"You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.",
|
||||
),
|
||||
("human", full_signal),
|
||||
]
|
||||
|
||||
return self.quick_thinking_llm.invoke(messages).content
|
||||
|
|
|
|||
|
|
@ -1,198 +1,198 @@
|
|||
"""Main orchestrator for the structured equity ranking engine.
|
||||
|
||||
Replaces the old TradingAgentsGraph with a tiered Pydantic-based pipeline.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from datetime import date
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from tradingagents.llm_clients import create_llm_client
|
||||
from tradingagents.dataflows.config import set_config
|
||||
|
||||
from .setup import StructuredGraphSetup
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TradingAgentsGraph:
|
||||
"""Structured equity ranking engine built on LangGraph."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
selected_analysts=None, # ignored — all agents run in structured pipeline
|
||||
debug=False,
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
callbacks: Optional[List] = None,
|
||||
):
|
||||
self.debug = debug
|
||||
self.config = config or DEFAULT_CONFIG
|
||||
self.callbacks = callbacks or []
|
||||
|
||||
set_config(self.config)
|
||||
|
||||
os.makedirs(
|
||||
os.path.join(self.config["project_dir"], "dataflows/data_cache"),
|
||||
exist_ok=True,
|
||||
)
|
||||
|
||||
# Initialize LLMs
|
||||
llm_kwargs = self._get_provider_kwargs()
|
||||
if self.callbacks:
|
||||
llm_kwargs["callbacks"] = self.callbacks
|
||||
|
||||
deep_client = create_llm_client(
|
||||
provider=self.config["llm_provider"],
|
||||
model=self.config["deep_think_llm"],
|
||||
base_url=self.config.get("backend_url"),
|
||||
**llm_kwargs,
|
||||
)
|
||||
quick_client = create_llm_client(
|
||||
provider=self.config["llm_provider"],
|
||||
model=self.config["quick_think_llm"],
|
||||
base_url=self.config.get("backend_url"),
|
||||
**llm_kwargs,
|
||||
)
|
||||
|
||||
self.deep_thinking_llm = deep_client.get_llm()
|
||||
self.quick_thinking_llm = quick_client.get_llm()
|
||||
|
||||
# Build the structured pipeline graph
|
||||
graph_setup = StructuredGraphSetup(
|
||||
self.quick_thinking_llm, self.deep_thinking_llm
|
||||
)
|
||||
self.graph = graph_setup.setup_graph()
|
||||
|
||||
# State tracking
|
||||
self.curr_state = None
|
||||
self.ticker = None
|
||||
|
||||
def _get_provider_kwargs(self) -> Dict[str, Any]:
|
||||
kwargs = {}
|
||||
provider = self.config.get("llm_provider", "").lower()
|
||||
if provider == "google":
|
||||
thinking_level = self.config.get("google_thinking_level")
|
||||
if thinking_level:
|
||||
kwargs["thinking_level"] = thinking_level
|
||||
elif provider == "openai":
|
||||
reasoning_effort = self.config.get("openai_reasoning_effort")
|
||||
if reasoning_effort:
|
||||
kwargs["reasoning_effort"] = reasoning_effort
|
||||
return kwargs
|
||||
|
||||
async def propagate(self, company_name: str, trade_date: str):
|
||||
"""Run the structured pipeline for a company (async — parallel nodes)."""
|
||||
import asyncio
|
||||
|
||||
self.ticker = company_name
|
||||
init_state = self._create_initial_state(company_name, trade_date)
|
||||
args = {"config": {"recursion_limit": 50}}
|
||||
|
||||
if self.debug:
|
||||
trace = []
|
||||
async for chunk in self.graph.astream(init_state, stream_mode="values", **args):
|
||||
trace.append(chunk)
|
||||
final_state = trace[-1] if trace else init_state
|
||||
else:
|
||||
final_state = await self.graph.ainvoke(init_state, **args)
|
||||
|
||||
self.curr_state = final_state
|
||||
self._log_state(trade_date, final_state)
|
||||
|
||||
decision = final_state.get("final_decision") or {}
|
||||
signal = decision.get("action", "AVOID")
|
||||
return final_state, signal
|
||||
|
||||
def _create_initial_state(self, ticker: str, trade_date: str) -> Dict[str, Any]:
|
||||
return {
|
||||
"ticker": ticker.upper(),
|
||||
"trade_date": str(trade_date),
|
||||
"validation": None,
|
||||
"company_card": None,
|
||||
"macro": None,
|
||||
"liquidity": None,
|
||||
"sector_rotation": None,
|
||||
"business_quality": None,
|
||||
"institutional_flow": None,
|
||||
"valuation": None,
|
||||
"entry_timing": None,
|
||||
"earnings_revisions": None,
|
||||
"backlog": None,
|
||||
"crowding": None,
|
||||
"archetype": None,
|
||||
"master_score": None,
|
||||
"adjusted_score": None,
|
||||
"position_role": None,
|
||||
"theme_substitution": None,
|
||||
"position_replacement": None,
|
||||
"bull_case": None,
|
||||
"bear_case": None,
|
||||
"debate": None,
|
||||
"risk": None,
|
||||
"final_decision": None,
|
||||
"hard_veto": False,
|
||||
"hard_veto_reason": None,
|
||||
"global_flags": [],
|
||||
}
|
||||
|
||||
def _log_state(self, trade_date: str, state: Dict[str, Any]):
|
||||
"""Log the final state to JSON."""
|
||||
log_data = {
|
||||
"ticker": state.get("ticker"),
|
||||
"trade_date": str(trade_date),
|
||||
"master_score": state.get("master_score"),
|
||||
"adjusted_score": state.get("adjusted_score"),
|
||||
"position_role": state.get("position_role"),
|
||||
"hard_veto": state.get("hard_veto"),
|
||||
"validation": state.get("validation"),
|
||||
"company_card": state.get("company_card"),
|
||||
"macro": state.get("macro"),
|
||||
"liquidity": state.get("liquidity"),
|
||||
"business_quality": state.get("business_quality"),
|
||||
"institutional_flow": state.get("institutional_flow"),
|
||||
"valuation": state.get("valuation"),
|
||||
"entry_timing": state.get("entry_timing"),
|
||||
"earnings_revisions": state.get("earnings_revisions"),
|
||||
"sector_rotation": state.get("sector_rotation"),
|
||||
"backlog": state.get("backlog"),
|
||||
"crowding": state.get("crowding"),
|
||||
"archetype": state.get("archetype"),
|
||||
"theme_substitution": state.get("theme_substitution"),
|
||||
"position_replacement": state.get("position_replacement"),
|
||||
"bull_case": state.get("bull_case"),
|
||||
"bear_case": state.get("bear_case"),
|
||||
"debate": state.get("debate"),
|
||||
"risk": state.get("risk"),
|
||||
"final_decision": state.get("final_decision"),
|
||||
}
|
||||
|
||||
directory = Path(f"eval_results/{self.ticker}/StructuredPipeline_logs/")
|
||||
directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
filepath = directory / f"analysis_{trade_date}.json"
|
||||
with open(filepath, "w") as f:
|
||||
json.dump(log_data, f, indent=2, default=str)
|
||||
logger.info("State logged to %s", filepath)
|
||||
|
||||
def process_signal(self, decision_text: str) -> str:
|
||||
"""Extract signal from decision text (legacy compatibility)."""
|
||||
if isinstance(decision_text, dict):
|
||||
return decision_text.get("action", "AVOID")
|
||||
text = str(decision_text).upper()
|
||||
if "BUY" in text:
|
||||
return "BUY"
|
||||
if "SELL" in text:
|
||||
return "SELL"
|
||||
if "HOLD" in text:
|
||||
return "HOLD"
|
||||
return "AVOID"
|
||||
|
||||
def reflect_and_remember(self, returns_losses):
|
||||
"""No-op for structured pipeline (no BM25 memory)."""
|
||||
pass
|
||||
"""Main orchestrator for the structured equity ranking engine.
|
||||
|
||||
Replaces the old TradingAgentsGraph with a tiered Pydantic-based pipeline.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from datetime import date
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from tradingagents.llm_clients import create_llm_client
|
||||
from tradingagents.dataflows.config import set_config
|
||||
|
||||
from .setup import StructuredGraphSetup
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TradingAgentsGraph:
|
||||
"""Structured equity ranking engine built on LangGraph."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
selected_analysts=None, # ignored — all agents run in structured pipeline
|
||||
debug=False,
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
callbacks: Optional[List] = None,
|
||||
):
|
||||
self.debug = debug
|
||||
self.config = config or DEFAULT_CONFIG
|
||||
self.callbacks = callbacks or []
|
||||
|
||||
set_config(self.config)
|
||||
|
||||
os.makedirs(
|
||||
os.path.join(self.config["project_dir"], "dataflows/data_cache"),
|
||||
exist_ok=True,
|
||||
)
|
||||
|
||||
# Initialize LLMs
|
||||
llm_kwargs = self._get_provider_kwargs()
|
||||
if self.callbacks:
|
||||
llm_kwargs["callbacks"] = self.callbacks
|
||||
|
||||
deep_client = create_llm_client(
|
||||
provider=self.config["llm_provider"],
|
||||
model=self.config["deep_think_llm"],
|
||||
base_url=self.config.get("backend_url"),
|
||||
**llm_kwargs,
|
||||
)
|
||||
quick_client = create_llm_client(
|
||||
provider=self.config["llm_provider"],
|
||||
model=self.config["quick_think_llm"],
|
||||
base_url=self.config.get("backend_url"),
|
||||
**llm_kwargs,
|
||||
)
|
||||
|
||||
self.deep_thinking_llm = deep_client.get_llm()
|
||||
self.quick_thinking_llm = quick_client.get_llm()
|
||||
|
||||
# Build the structured pipeline graph
|
||||
graph_setup = StructuredGraphSetup(
|
||||
self.quick_thinking_llm, self.deep_thinking_llm
|
||||
)
|
||||
self.graph = graph_setup.setup_graph()
|
||||
|
||||
# State tracking
|
||||
self.curr_state = None
|
||||
self.ticker = None
|
||||
|
||||
def _get_provider_kwargs(self) -> Dict[str, Any]:
|
||||
kwargs = {}
|
||||
provider = self.config.get("llm_provider", "").lower()
|
||||
if provider == "google":
|
||||
thinking_level = self.config.get("google_thinking_level")
|
||||
if thinking_level:
|
||||
kwargs["thinking_level"] = thinking_level
|
||||
elif provider == "openai":
|
||||
reasoning_effort = self.config.get("openai_reasoning_effort")
|
||||
if reasoning_effort:
|
||||
kwargs["reasoning_effort"] = reasoning_effort
|
||||
return kwargs
|
||||
|
||||
async def propagate(self, company_name: str, trade_date: str):
|
||||
"""Run the structured pipeline for a company (async — parallel nodes)."""
|
||||
import asyncio
|
||||
|
||||
self.ticker = company_name
|
||||
init_state = self._create_initial_state(company_name, trade_date)
|
||||
args = {"config": {"recursion_limit": 50}}
|
||||
|
||||
if self.debug:
|
||||
trace = []
|
||||
async for chunk in self.graph.astream(init_state, stream_mode="values", **args):
|
||||
trace.append(chunk)
|
||||
final_state = trace[-1] if trace else init_state
|
||||
else:
|
||||
final_state = await self.graph.ainvoke(init_state, **args)
|
||||
|
||||
self.curr_state = final_state
|
||||
self._log_state(trade_date, final_state)
|
||||
|
||||
decision = final_state.get("final_decision") or {}
|
||||
signal = decision.get("action", "AVOID")
|
||||
return final_state, signal
|
||||
|
||||
def _create_initial_state(self, ticker: str, trade_date: str) -> Dict[str, Any]:
|
||||
return {
|
||||
"ticker": ticker.upper(),
|
||||
"trade_date": str(trade_date),
|
||||
"validation": None,
|
||||
"company_card": None,
|
||||
"macro": None,
|
||||
"liquidity": None,
|
||||
"sector_rotation": None,
|
||||
"business_quality": None,
|
||||
"institutional_flow": None,
|
||||
"valuation": None,
|
||||
"entry_timing": None,
|
||||
"earnings_revisions": None,
|
||||
"backlog": None,
|
||||
"crowding": None,
|
||||
"archetype": None,
|
||||
"master_score": None,
|
||||
"adjusted_score": None,
|
||||
"position_role": None,
|
||||
"theme_substitution": None,
|
||||
"position_replacement": None,
|
||||
"bull_case": None,
|
||||
"bear_case": None,
|
||||
"debate": None,
|
||||
"risk": None,
|
||||
"final_decision": None,
|
||||
"hard_veto": False,
|
||||
"hard_veto_reason": None,
|
||||
"global_flags": [],
|
||||
}
|
||||
|
||||
def _log_state(self, trade_date: str, state: Dict[str, Any]):
|
||||
"""Log the final state to JSON."""
|
||||
log_data = {
|
||||
"ticker": state.get("ticker"),
|
||||
"trade_date": str(trade_date),
|
||||
"master_score": state.get("master_score"),
|
||||
"adjusted_score": state.get("adjusted_score"),
|
||||
"position_role": state.get("position_role"),
|
||||
"hard_veto": state.get("hard_veto"),
|
||||
"validation": state.get("validation"),
|
||||
"company_card": state.get("company_card"),
|
||||
"macro": state.get("macro"),
|
||||
"liquidity": state.get("liquidity"),
|
||||
"business_quality": state.get("business_quality"),
|
||||
"institutional_flow": state.get("institutional_flow"),
|
||||
"valuation": state.get("valuation"),
|
||||
"entry_timing": state.get("entry_timing"),
|
||||
"earnings_revisions": state.get("earnings_revisions"),
|
||||
"sector_rotation": state.get("sector_rotation"),
|
||||
"backlog": state.get("backlog"),
|
||||
"crowding": state.get("crowding"),
|
||||
"archetype": state.get("archetype"),
|
||||
"theme_substitution": state.get("theme_substitution"),
|
||||
"position_replacement": state.get("position_replacement"),
|
||||
"bull_case": state.get("bull_case"),
|
||||
"bear_case": state.get("bear_case"),
|
||||
"debate": state.get("debate"),
|
||||
"risk": state.get("risk"),
|
||||
"final_decision": state.get("final_decision"),
|
||||
}
|
||||
|
||||
directory = Path(f"eval_results/{self.ticker}/StructuredPipeline_logs/")
|
||||
directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
filepath = directory / f"analysis_{trade_date}.json"
|
||||
with open(filepath, "w") as f:
|
||||
json.dump(log_data, f, indent=2, default=str)
|
||||
logger.info("State logged to %s", filepath)
|
||||
|
||||
def process_signal(self, decision_text: str) -> str:
|
||||
"""Extract signal from decision text (legacy compatibility)."""
|
||||
if isinstance(decision_text, dict):
|
||||
return decision_text.get("action", "AVOID")
|
||||
text = str(decision_text).upper()
|
||||
if "BUY" in text:
|
||||
return "BUY"
|
||||
if "SELL" in text:
|
||||
return "SELL"
|
||||
if "HOLD" in text:
|
||||
return "HOLD"
|
||||
return "AVOID"
|
||||
|
||||
def reflect_and_remember(self, returns_losses):
|
||||
"""No-op for structured pipeline (no BM25 memory)."""
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -1,24 +1,24 @@
|
|||
# LLM Clients - Consistency Improvements
|
||||
|
||||
## Issues to Fix
|
||||
|
||||
### 1. `validate_model()` is never called
|
||||
- Add validation call in `get_llm()` with warning (not error) for unknown models
|
||||
|
||||
### 2. Inconsistent parameter handling
|
||||
| Client | API Key Param | Special Params |
|
||||
|--------|---------------|----------------|
|
||||
| OpenAI | `api_key` | `reasoning_effort` |
|
||||
| Anthropic | `api_key` | `thinking_config` → `thinking` |
|
||||
| Google | `google_api_key` | `thinking_budget` |
|
||||
|
||||
**Fix:** Standardize with unified `api_key` that maps to provider-specific keys
|
||||
|
||||
### 3. `base_url` accepted but ignored
|
||||
- `AnthropicClient`: accepts `base_url` but never uses it
|
||||
- `GoogleClient`: accepts `base_url` but never uses it (correct - Google doesn't support it)
|
||||
|
||||
**Fix:** Remove unused `base_url` from clients that don't support it
|
||||
|
||||
### 4. Update validators.py with models from CLI
|
||||
- Sync `VALID_MODELS` dict with CLI model options after Feature 2 is complete
|
||||
# LLM Clients - Consistency Improvements
|
||||
|
||||
## Issues to Fix
|
||||
|
||||
### 1. `validate_model()` is never called
|
||||
- Add validation call in `get_llm()` with warning (not error) for unknown models
|
||||
|
||||
### 2. Inconsistent parameter handling
|
||||
| Client | API Key Param | Special Params |
|
||||
|--------|---------------|----------------|
|
||||
| OpenAI | `api_key` | `reasoning_effort` |
|
||||
| Anthropic | `api_key` | `thinking_config` → `thinking` |
|
||||
| Google | `google_api_key` | `thinking_budget` |
|
||||
|
||||
**Fix:** Standardize with unified `api_key` that maps to provider-specific keys
|
||||
|
||||
### 3. `base_url` accepted but ignored
|
||||
- `AnthropicClient`: accepts `base_url` but never uses it
|
||||
- `GoogleClient`: accepts `base_url` but never uses it (correct - Google doesn't support it)
|
||||
|
||||
**Fix:** Remove unused `base_url` from clients that don't support it
|
||||
|
||||
### 4. Update validators.py with models from CLI
|
||||
- Sync `VALID_MODELS` dict with CLI model options after Feature 2 is complete
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from .base_client import BaseLLMClient
|
||||
from .factory import create_llm_client
|
||||
|
||||
__all__ = ["BaseLLMClient", "create_llm_client"]
|
||||
from .base_client import BaseLLMClient
|
||||
from .factory import create_llm_client
|
||||
|
||||
__all__ = ["BaseLLMClient", "create_llm_client"]
|
||||
|
|
|
|||
|
|
@ -1,27 +1,27 @@
|
|||
from typing import Any, Optional
|
||||
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
from .base_client import BaseLLMClient
|
||||
from .validators import validate_model
|
||||
|
||||
|
||||
class AnthropicClient(BaseLLMClient):
|
||||
"""Client for Anthropic Claude models."""
|
||||
|
||||
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
|
||||
super().__init__(model, base_url, **kwargs)
|
||||
|
||||
def get_llm(self) -> Any:
|
||||
"""Return configured ChatAnthropic instance."""
|
||||
llm_kwargs = {"model": self.model}
|
||||
|
||||
for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
return ChatAnthropic(**llm_kwargs)
|
||||
|
||||
def validate_model(self) -> bool:
|
||||
"""Validate model for Anthropic."""
|
||||
return validate_model("anthropic", self.model)
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
from .base_client import BaseLLMClient
|
||||
from .validators import validate_model
|
||||
|
||||
|
||||
class AnthropicClient(BaseLLMClient):
|
||||
"""Client for Anthropic Claude models."""
|
||||
|
||||
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
|
||||
super().__init__(model, base_url, **kwargs)
|
||||
|
||||
def get_llm(self) -> Any:
|
||||
"""Return configured ChatAnthropic instance."""
|
||||
llm_kwargs = {"model": self.model}
|
||||
|
||||
for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
return ChatAnthropic(**llm_kwargs)
|
||||
|
||||
def validate_model(self) -> bool:
|
||||
"""Validate model for Anthropic."""
|
||||
return validate_model("anthropic", self.model)
|
||||
|
|
|
|||
|
|
@ -1,21 +1,21 @@
|
|||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
class BaseLLMClient(ABC):
|
||||
"""Abstract base class for LLM clients."""
|
||||
|
||||
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
|
||||
self.model = model
|
||||
self.base_url = base_url
|
||||
self.kwargs = kwargs
|
||||
|
||||
@abstractmethod
|
||||
def get_llm(self) -> Any:
|
||||
"""Return the configured LLM instance."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def validate_model(self) -> bool:
|
||||
"""Validate that the model is supported by this client."""
|
||||
pass
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
class BaseLLMClient(ABC):
|
||||
"""Abstract base class for LLM clients."""
|
||||
|
||||
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
|
||||
self.model = model
|
||||
self.base_url = base_url
|
||||
self.kwargs = kwargs
|
||||
|
||||
@abstractmethod
|
||||
def get_llm(self) -> Any:
|
||||
"""Return the configured LLM instance."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def validate_model(self) -> bool:
|
||||
"""Validate that the model is supported by this client."""
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -1,43 +1,43 @@
|
|||
from typing import Optional
|
||||
|
||||
from .base_client import BaseLLMClient
|
||||
from .openai_client import OpenAIClient
|
||||
from .anthropic_client import AnthropicClient
|
||||
from .google_client import GoogleClient
|
||||
|
||||
|
||||
def create_llm_client(
|
||||
provider: str,
|
||||
model: str,
|
||||
base_url: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> BaseLLMClient:
|
||||
"""Create an LLM client for the specified provider.
|
||||
|
||||
Args:
|
||||
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
|
||||
model: Model name/identifier
|
||||
base_url: Optional base URL for API endpoint
|
||||
**kwargs: Additional provider-specific arguments
|
||||
|
||||
Returns:
|
||||
Configured BaseLLMClient instance
|
||||
|
||||
Raises:
|
||||
ValueError: If provider is not supported
|
||||
"""
|
||||
provider_lower = provider.lower()
|
||||
|
||||
if provider_lower in ("openai", "ollama", "openrouter"):
|
||||
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
|
||||
|
||||
if provider_lower == "xai":
|
||||
return OpenAIClient(model, base_url, provider="xai", **kwargs)
|
||||
|
||||
if provider_lower == "anthropic":
|
||||
return AnthropicClient(model, base_url, **kwargs)
|
||||
|
||||
if provider_lower == "google":
|
||||
return GoogleClient(model, base_url, **kwargs)
|
||||
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
from typing import Optional
|
||||
|
||||
from .base_client import BaseLLMClient
|
||||
from .openai_client import OpenAIClient
|
||||
from .anthropic_client import AnthropicClient
|
||||
from .google_client import GoogleClient
|
||||
|
||||
|
||||
def create_llm_client(
|
||||
provider: str,
|
||||
model: str,
|
||||
base_url: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> BaseLLMClient:
|
||||
"""Create an LLM client for the specified provider.
|
||||
|
||||
Args:
|
||||
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
|
||||
model: Model name/identifier
|
||||
base_url: Optional base URL for API endpoint
|
||||
**kwargs: Additional provider-specific arguments
|
||||
|
||||
Returns:
|
||||
Configured BaseLLMClient instance
|
||||
|
||||
Raises:
|
||||
ValueError: If provider is not supported
|
||||
"""
|
||||
provider_lower = provider.lower()
|
||||
|
||||
if provider_lower in ("openai", "ollama", "openrouter"):
|
||||
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
|
||||
|
||||
if provider_lower == "xai":
|
||||
return OpenAIClient(model, base_url, provider="xai", **kwargs)
|
||||
|
||||
if provider_lower == "anthropic":
|
||||
return AnthropicClient(model, base_url, **kwargs)
|
||||
|
||||
if provider_lower == "google":
|
||||
return GoogleClient(model, base_url, **kwargs)
|
||||
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
|
|
|||
|
|
@ -1,65 +1,65 @@
|
|||
from typing import Any, Optional
|
||||
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
|
||||
from .base_client import BaseLLMClient
|
||||
from .validators import validate_model
|
||||
|
||||
|
||||
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
|
||||
"""ChatGoogleGenerativeAI with normalized content output.
|
||||
|
||||
Gemini 3 models return content as list: [{'type': 'text', 'text': '...'}]
|
||||
This normalizes to string for consistent downstream handling.
|
||||
"""
|
||||
|
||||
def _normalize_content(self, response):
|
||||
content = response.content
|
||||
if isinstance(content, list):
|
||||
texts = [
|
||||
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
|
||||
else item if isinstance(item, str) else ""
|
||||
for item in content
|
||||
]
|
||||
response.content = "\n".join(t for t in texts if t)
|
||||
return response
|
||||
|
||||
def invoke(self, input, config=None, **kwargs):
|
||||
return self._normalize_content(super().invoke(input, config, **kwargs))
|
||||
|
||||
|
||||
class GoogleClient(BaseLLMClient):
|
||||
"""Client for Google Gemini models."""
|
||||
|
||||
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
|
||||
super().__init__(model, base_url, **kwargs)
|
||||
|
||||
def get_llm(self) -> Any:
|
||||
"""Return configured ChatGoogleGenerativeAI instance."""
|
||||
llm_kwargs = {"model": self.model}
|
||||
|
||||
for key in ("timeout", "max_retries", "google_api_key", "callbacks"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
# Map thinking_level to appropriate API param based on model
|
||||
# Gemini 3 Pro: low, high
|
||||
# Gemini 3 Flash: minimal, low, medium, high
|
||||
# Gemini 2.5: thinking_budget (0=disable, -1=dynamic)
|
||||
thinking_level = self.kwargs.get("thinking_level")
|
||||
if thinking_level:
|
||||
model_lower = self.model.lower()
|
||||
if "gemini-3" in model_lower:
|
||||
# Gemini 3 Pro doesn't support "minimal", use "low" instead
|
||||
if "pro" in model_lower and thinking_level == "minimal":
|
||||
thinking_level = "low"
|
||||
llm_kwargs["thinking_level"] = thinking_level
|
||||
else:
|
||||
# Gemini 2.5: map to thinking_budget
|
||||
llm_kwargs["thinking_budget"] = -1 if thinking_level == "high" else 0
|
||||
|
||||
return NormalizedChatGoogleGenerativeAI(**llm_kwargs)
|
||||
|
||||
def validate_model(self) -> bool:
|
||||
"""Validate model for Google."""
|
||||
return validate_model("google", self.model)
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
|
||||
from .base_client import BaseLLMClient
|
||||
from .validators import validate_model
|
||||
|
||||
|
||||
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
|
||||
"""ChatGoogleGenerativeAI with normalized content output.
|
||||
|
||||
Gemini 3 models return content as list: [{'type': 'text', 'text': '...'}]
|
||||
This normalizes to string for consistent downstream handling.
|
||||
"""
|
||||
|
||||
def _normalize_content(self, response):
|
||||
content = response.content
|
||||
if isinstance(content, list):
|
||||
texts = [
|
||||
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
|
||||
else item if isinstance(item, str) else ""
|
||||
for item in content
|
||||
]
|
||||
response.content = "\n".join(t for t in texts if t)
|
||||
return response
|
||||
|
||||
def invoke(self, input, config=None, **kwargs):
|
||||
return self._normalize_content(super().invoke(input, config, **kwargs))
|
||||
|
||||
|
||||
class GoogleClient(BaseLLMClient):
|
||||
"""Client for Google Gemini models."""
|
||||
|
||||
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
|
||||
super().__init__(model, base_url, **kwargs)
|
||||
|
||||
def get_llm(self) -> Any:
|
||||
"""Return configured ChatGoogleGenerativeAI instance."""
|
||||
llm_kwargs = {"model": self.model}
|
||||
|
||||
for key in ("timeout", "max_retries", "google_api_key", "callbacks"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
# Map thinking_level to appropriate API param based on model
|
||||
# Gemini 3 Pro: low, high
|
||||
# Gemini 3 Flash: minimal, low, medium, high
|
||||
# Gemini 2.5: thinking_budget (0=disable, -1=dynamic)
|
||||
thinking_level = self.kwargs.get("thinking_level")
|
||||
if thinking_level:
|
||||
model_lower = self.model.lower()
|
||||
if "gemini-3" in model_lower:
|
||||
# Gemini 3 Pro doesn't support "minimal", use "low" instead
|
||||
if "pro" in model_lower and thinking_level == "minimal":
|
||||
thinking_level = "low"
|
||||
llm_kwargs["thinking_level"] = thinking_level
|
||||
else:
|
||||
# Gemini 2.5: map to thinking_budget
|
||||
llm_kwargs["thinking_budget"] = -1 if thinking_level == "high" else 0
|
||||
|
||||
return NormalizedChatGoogleGenerativeAI(**llm_kwargs)
|
||||
|
||||
def validate_model(self) -> bool:
|
||||
"""Validate model for Google."""
|
||||
return validate_model("google", self.model)
|
||||
|
|
|
|||
|
|
@ -1,75 +1,75 @@
|
|||
import os
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from .base_client import BaseLLMClient
|
||||
from .validators import validate_model
|
||||
|
||||
|
||||
class UnifiedChatOpenAI(ChatOpenAI):
|
||||
"""ChatOpenAI subclass that strips incompatible params for certain models."""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
model = kwargs.get("model", "")
|
||||
if self._is_reasoning_model(model):
|
||||
kwargs.pop("temperature", None)
|
||||
kwargs.pop("top_p", None)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _is_reasoning_model(model: str) -> bool:
|
||||
"""Check if model is a reasoning model that doesn't support temperature."""
|
||||
model_lower = model.lower()
|
||||
return (
|
||||
model_lower.startswith("o1")
|
||||
or model_lower.startswith("o3")
|
||||
or "gpt-5" in model_lower
|
||||
)
|
||||
|
||||
|
||||
class OpenAIClient(BaseLLMClient):
|
||||
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
base_url: Optional[str] = None,
|
||||
provider: str = "openai",
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(model, base_url, **kwargs)
|
||||
self.provider = provider.lower()
|
||||
|
||||
def get_llm(self) -> Any:
|
||||
"""Return configured ChatOpenAI instance."""
|
||||
llm_kwargs = {"model": self.model}
|
||||
|
||||
if self.provider == "xai":
|
||||
llm_kwargs["base_url"] = "https://api.x.ai/v1"
|
||||
api_key = os.environ.get("XAI_API_KEY")
|
||||
if api_key:
|
||||
llm_kwargs["api_key"] = api_key
|
||||
elif self.provider == "openrouter":
|
||||
llm_kwargs["base_url"] = "https://openrouter.ai/api/v1"
|
||||
api_key = os.environ.get("OPENROUTER_API_KEY")
|
||||
if api_key:
|
||||
llm_kwargs["api_key"] = api_key
|
||||
elif self.provider == "ollama":
|
||||
llm_kwargs["base_url"] = "http://localhost:11434/v1"
|
||||
llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth
|
||||
elif self.base_url:
|
||||
llm_kwargs["base_url"] = self.base_url
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
if api_key:
|
||||
llm_kwargs["api_key"] = api_key
|
||||
|
||||
for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
return UnifiedChatOpenAI(**llm_kwargs)
|
||||
|
||||
def validate_model(self) -> bool:
|
||||
"""Validate model for the provider."""
|
||||
return validate_model(self.provider, self.model)
|
||||
import os
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from .base_client import BaseLLMClient
|
||||
from .validators import validate_model
|
||||
|
||||
|
||||
class UnifiedChatOpenAI(ChatOpenAI):
|
||||
"""ChatOpenAI subclass that strips incompatible params for certain models."""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
model = kwargs.get("model", "")
|
||||
if self._is_reasoning_model(model):
|
||||
kwargs.pop("temperature", None)
|
||||
kwargs.pop("top_p", None)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _is_reasoning_model(model: str) -> bool:
|
||||
"""Check if model is a reasoning model that doesn't support temperature."""
|
||||
model_lower = model.lower()
|
||||
return (
|
||||
model_lower.startswith("o1")
|
||||
or model_lower.startswith("o3")
|
||||
or "gpt-5" in model_lower
|
||||
)
|
||||
|
||||
|
||||
class OpenAIClient(BaseLLMClient):
|
||||
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
base_url: Optional[str] = None,
|
||||
provider: str = "openai",
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(model, base_url, **kwargs)
|
||||
self.provider = provider.lower()
|
||||
|
||||
def get_llm(self) -> Any:
|
||||
"""Return configured ChatOpenAI instance."""
|
||||
llm_kwargs = {"model": self.model}
|
||||
|
||||
if self.provider == "xai":
|
||||
llm_kwargs["base_url"] = "https://api.x.ai/v1"
|
||||
api_key = os.environ.get("XAI_API_KEY")
|
||||
if api_key:
|
||||
llm_kwargs["api_key"] = api_key
|
||||
elif self.provider == "openrouter":
|
||||
llm_kwargs["base_url"] = "https://openrouter.ai/api/v1"
|
||||
api_key = os.environ.get("OPENROUTER_API_KEY")
|
||||
if api_key:
|
||||
llm_kwargs["api_key"] = api_key
|
||||
elif self.provider == "ollama":
|
||||
llm_kwargs["base_url"] = "http://localhost:11434/v1"
|
||||
llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth
|
||||
elif self.base_url:
|
||||
llm_kwargs["base_url"] = self.base_url
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
if api_key:
|
||||
llm_kwargs["api_key"] = api_key
|
||||
|
||||
for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
return UnifiedChatOpenAI(**llm_kwargs)
|
||||
|
||||
def validate_model(self) -> bool:
|
||||
"""Validate model for the provider."""
|
||||
return validate_model(self.provider, self.model)
|
||||
|
|
|
|||
|
|
@ -1,82 +1,82 @@
|
|||
"""Model name validators for each provider.
|
||||
|
||||
Only validates model names - does NOT enforce limits.
|
||||
Let LLM providers use their own defaults for unspecified params.
|
||||
"""
|
||||
|
||||
VALID_MODELS = {
|
||||
"openai": [
|
||||
# GPT-5 series (2025)
|
||||
"gpt-5.2",
|
||||
"gpt-5.1",
|
||||
"gpt-5",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-nano",
|
||||
# GPT-4.1 series (2025)
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-mini",
|
||||
"gpt-4.1-nano",
|
||||
# o-series reasoning models
|
||||
"o4-mini",
|
||||
"o3",
|
||||
"o3-mini",
|
||||
"o1",
|
||||
"o1-preview",
|
||||
# GPT-4o series (legacy but still supported)
|
||||
"gpt-4o",
|
||||
"gpt-4o-mini",
|
||||
],
|
||||
"anthropic": [
|
||||
# Claude 4.5 series (2025)
|
||||
"claude-opus-4-5",
|
||||
"claude-sonnet-4-5",
|
||||
"claude-haiku-4-5",
|
||||
# Claude 4.x series
|
||||
"claude-opus-4-1-20250805",
|
||||
"claude-sonnet-4-20250514",
|
||||
# Claude 3.7 series
|
||||
"claude-3-7-sonnet-20250219",
|
||||
# Claude 3.5 series (legacy)
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
],
|
||||
"google": [
|
||||
# Gemini 3 series (preview)
|
||||
"gemini-3-pro-preview",
|
||||
"gemini-3-flash-preview",
|
||||
# Gemini 2.5 series
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-lite",
|
||||
# Gemini 2.0 series
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-lite",
|
||||
],
|
||||
"xai": [
|
||||
# Grok 4.1 series
|
||||
"grok-4-1-fast",
|
||||
"grok-4-1-fast-reasoning",
|
||||
"grok-4-1-fast-non-reasoning",
|
||||
# Grok 4 series
|
||||
"grok-4",
|
||||
"grok-4-0709",
|
||||
"grok-4-fast-reasoning",
|
||||
"grok-4-fast-non-reasoning",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def validate_model(provider: str, model: str) -> bool:
|
||||
"""Check if model name is valid for the given provider.
|
||||
|
||||
For ollama, openrouter - any model is accepted.
|
||||
"""
|
||||
provider_lower = provider.lower()
|
||||
|
||||
if provider_lower in ("ollama", "openrouter"):
|
||||
return True
|
||||
|
||||
if provider_lower not in VALID_MODELS:
|
||||
return True
|
||||
|
||||
return model in VALID_MODELS[provider_lower]
|
||||
"""Model name validators for each provider.
|
||||
|
||||
Only validates model names - does NOT enforce limits.
|
||||
Let LLM providers use their own defaults for unspecified params.
|
||||
"""
|
||||
|
||||
VALID_MODELS = {
|
||||
"openai": [
|
||||
# GPT-5 series (2025)
|
||||
"gpt-5.2",
|
||||
"gpt-5.1",
|
||||
"gpt-5",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-nano",
|
||||
# GPT-4.1 series (2025)
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-mini",
|
||||
"gpt-4.1-nano",
|
||||
# o-series reasoning models
|
||||
"o4-mini",
|
||||
"o3",
|
||||
"o3-mini",
|
||||
"o1",
|
||||
"o1-preview",
|
||||
# GPT-4o series (legacy but still supported)
|
||||
"gpt-4o",
|
||||
"gpt-4o-mini",
|
||||
],
|
||||
"anthropic": [
|
||||
# Claude 4.5 series (2025)
|
||||
"claude-opus-4-5",
|
||||
"claude-sonnet-4-5",
|
||||
"claude-haiku-4-5",
|
||||
# Claude 4.x series
|
||||
"claude-opus-4-1-20250805",
|
||||
"claude-sonnet-4-20250514",
|
||||
# Claude 3.7 series
|
||||
"claude-3-7-sonnet-20250219",
|
||||
# Claude 3.5 series (legacy)
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
],
|
||||
"google": [
|
||||
# Gemini 3 series (preview)
|
||||
"gemini-3-pro-preview",
|
||||
"gemini-3-flash-preview",
|
||||
# Gemini 2.5 series
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-lite",
|
||||
# Gemini 2.0 series
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-lite",
|
||||
],
|
||||
"xai": [
|
||||
# Grok 4.1 series
|
||||
"grok-4-1-fast",
|
||||
"grok-4-1-fast-reasoning",
|
||||
"grok-4-1-fast-non-reasoning",
|
||||
# Grok 4 series
|
||||
"grok-4",
|
||||
"grok-4-0709",
|
||||
"grok-4-fast-reasoning",
|
||||
"grok-4-fast-non-reasoning",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def validate_model(provider: str, model: str) -> bool:
|
||||
"""Check if model name is valid for the given provider.
|
||||
|
||||
For ollama, openrouter - any model is accepted.
|
||||
"""
|
||||
provider_lower = provider.lower()
|
||||
|
||||
if provider_lower in ("ollama", "openrouter"):
|
||||
return True
|
||||
|
||||
if provider_lower not in VALID_MODELS:
|
||||
return True
|
||||
|
||||
return model in VALID_MODELS[provider_lower]
|
||||
|
|
|
|||
Loading…
Reference in New Issue