:[add] OpenAIEmbeddings from langchain_openai

This commit is contained in:
kimheesu 2025-06-12 15:06:35 +09:00
parent 570644d939
commit ba5b6e8be8
43 changed files with 5230 additions and 5229 deletions

16
.gitignore vendored
View File

@ -1,8 +1,8 @@
env/ env/
__pycache__/ __pycache__/
.DS_Store .DS_Store
*.csv *.csv
src/ src/
eval_results/ eval_results/
eval_data/ eval_data/
*.egg-info/ *.egg-info/

402
LICENSE
View File

@ -1,201 +1,201 @@
Apache License Apache License
Version 2.0, January 2004 Version 2.0, January 2004
http://www.apache.org/licenses/ http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions. 1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, "License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document. and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by "Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License. the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all "Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition, control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the "control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity. outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity "You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License. exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, "Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation including but not limited to software source code, documentation
source, and configuration files. source, and configuration files.
"Object" form shall mean any form resulting from mechanical "Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation, not limited to compiled object code, generated documentation,
and conversions to other media types. and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or "Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work copyright notice that is included in or attached to the work
(an example is provided in the Appendix below). (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object "Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of, separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof. the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including "Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted" the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution." designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity "Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work. subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of 2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual, this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of, copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form. Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of 3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual, this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made, (except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work, use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s) Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate granted to You under this License for that Work shall terminate
as of the date such litigation is filed. as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the 4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You modifications, and in Source or Object form, provided that You
meet the following conditions: meet the following conditions:
(a) You must give any other recipients of the Work or (a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices (b) You must cause any modified files to carry prominent notices
stating that You changed the files; and stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works (c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work, attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of excluding those notices that do not pertain to any part of
the Derivative Works; and the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its (d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or, documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed that such additional attribution notices cannot be construed
as modifying the License. as modifying the License.
You may add Your own copyright statement to Your modifications and You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use, for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License. the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, 5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions. this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions. with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade 6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor, names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file. origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or 7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS, Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License. risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, 8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise, whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill, Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages. has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing 9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer, the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity, and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify, of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability. of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work. APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]" boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright [yyyy] [name of copyright owner] Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.

426
README.md
View File

@ -1,213 +1,213 @@
<p align="center"> <p align="center">
<img src="assets/TauricResearch.png" style="width: 60%; height: auto;"> <img src="assets/TauricResearch.png" style="width: 60%; height: auto;">
</p> </p>
<div align="center" style="line-height: 1;"> <div align="center" style="line-height: 1;">
<a href="https://arxiv.org/abs/2412.20138" target="_blank"><img alt="arXiv" src="https://img.shields.io/badge/arXiv-2412.20138-B31B1B?logo=arxiv"/></a> <a href="https://arxiv.org/abs/2412.20138" target="_blank"><img alt="arXiv" src="https://img.shields.io/badge/arXiv-2412.20138-B31B1B?logo=arxiv"/></a>
<a href="https://discord.com/invite/hk9PGKShPK" target="_blank"><img alt="Discord" src="https://img.shields.io/badge/Discord-TradingResearch-7289da?logo=discord&logoColor=white&color=7289da"/></a> <a href="https://discord.com/invite/hk9PGKShPK" target="_blank"><img alt="Discord" src="https://img.shields.io/badge/Discord-TradingResearch-7289da?logo=discord&logoColor=white&color=7289da"/></a>
<a href="./assets/wechat.png" target="_blank"><img alt="WeChat" src="https://img.shields.io/badge/WeChat-TauricResearch-brightgreen?logo=wechat&logoColor=white"/></a> <a href="./assets/wechat.png" target="_blank"><img alt="WeChat" src="https://img.shields.io/badge/WeChat-TauricResearch-brightgreen?logo=wechat&logoColor=white"/></a>
<a href="https://x.com/TauricResearch" target="_blank"><img alt="X Follow" src="https://img.shields.io/badge/X-TauricResearch-white?logo=x&logoColor=white"/></a> <a href="https://x.com/TauricResearch" target="_blank"><img alt="X Follow" src="https://img.shields.io/badge/X-TauricResearch-white?logo=x&logoColor=white"/></a>
<br> <br>
<a href="https://github.com/TauricResearch/" target="_blank"><img alt="Community" src="https://img.shields.io/badge/Join_GitHub_Community-TauricResearch-14C290?logo=discourse"/></a> <a href="https://github.com/TauricResearch/" target="_blank"><img alt="Community" src="https://img.shields.io/badge/Join_GitHub_Community-TauricResearch-14C290?logo=discourse"/></a>
</div> </div>
<div align="center"> <div align="center">
<!-- Keep these links. Translations will automatically update with the README. --> <!-- Keep these links. Translations will automatically update with the README. -->
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=de">Deutsch</a> | <a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=de">Deutsch</a> |
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=es">Español</a> | <a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=es">Español</a> |
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=fr">français</a> | <a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=fr">français</a> |
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ja">日本語</a> | <a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ja">日本語</a> |
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ko">한국어</a> | <a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ko">한국어</a> |
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=pt">Português</a> | <a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=pt">Português</a> |
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ru">Русский</a> | <a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=ru">Русский</a> |
<a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=zh">中文</a> <a href="https://www.readme-i18n.com/TauricResearch/TradingAgents?lang=zh">中文</a>
</div> </div>
--- ---
# TradingAgents: Multi-Agents LLM Financial Trading Framework # TradingAgents: Multi-Agents LLM Financial Trading Framework
> 🎉 **TradingAgents** officially released! We have received numerous inquiries about the work, and we would like to express our thanks for the enthusiasm in our community. > 🎉 **TradingAgents** officially released! We have received numerous inquiries about the work, and we would like to express our thanks for the enthusiasm in our community.
> >
> So we decided to fully open-source the framework. Looking forward to building impactful projects with you! > So we decided to fully open-source the framework. Looking forward to building impactful projects with you!
<div align="center"> <div align="center">
<a href="https://www.star-history.com/#TauricResearch/TradingAgents&Date"> <a href="https://www.star-history.com/#TauricResearch/TradingAgents&Date">
<picture> <picture>
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date&theme=dark" /> <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date&theme=dark" />
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" /> <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" />
<img alt="TradingAgents Star History" src="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" style="width: 80%; height: auto;" /> <img alt="TradingAgents Star History" src="https://api.star-history.com/svg?repos=TauricResearch/TradingAgents&type=Date" style="width: 80%; height: auto;" />
</picture> </picture>
</a> </a>
</div> </div>
<div align="center"> <div align="center">
🚀 [TradingAgents](#tradingagents-framework) | ⚡ [Installation & CLI](#installation-and-cli) | 🎬 [Demo](https://www.youtube.com/watch?v=90gr5lwjIho) | 📦 [Package Usage](#tradingagents-package) | 🤝 [Contributing](#contributing) | 📄 [Citation](#citation) 🚀 [TradingAgents](#tradingagents-framework) | ⚡ [Installation & CLI](#installation-and-cli) | 🎬 [Demo](https://www.youtube.com/watch?v=90gr5lwjIho) | 📦 [Package Usage](#tradingagents-package) | 🤝 [Contributing](#contributing) | 📄 [Citation](#citation)
</div> </div>
## TradingAgents Framework ## TradingAgents Framework
TradingAgents is a multi-agent trading framework that mirrors the dynamics of real-world trading firms. By deploying specialized LLM-powered agents: from fundamental analysts, sentiment experts, and technical analysts, to trader, risk management team, the platform collaboratively evaluates market conditions and informs trading decisions. Moreover, these agents engage in dynamic discussions to pinpoint the optimal strategy. TradingAgents is a multi-agent trading framework that mirrors the dynamics of real-world trading firms. By deploying specialized LLM-powered agents: from fundamental analysts, sentiment experts, and technical analysts, to trader, risk management team, the platform collaboratively evaluates market conditions and informs trading decisions. Moreover, these agents engage in dynamic discussions to pinpoint the optimal strategy.
<p align="center"> <p align="center">
<img src="assets/schema.png" style="width: 100%; height: auto;"> <img src="assets/schema.png" style="width: 100%; height: auto;">
</p> </p>
> TradingAgents framework is designed for research purposes. Trading performance may vary based on many factors, including the chosen backbone language models, model temperature, trading periods, the quality of data, and other non-deterministic factors. [It is not intended as financial, investment, or trading advice.](https://tauric.ai/disclaimer/) > TradingAgents framework is designed for research purposes. Trading performance may vary based on many factors, including the chosen backbone language models, model temperature, trading periods, the quality of data, and other non-deterministic factors. [It is not intended as financial, investment, or trading advice.](https://tauric.ai/disclaimer/)
Our framework decomposes complex trading tasks into specialized roles. This ensures the system achieves a robust, scalable approach to market analysis and decision-making. Our framework decomposes complex trading tasks into specialized roles. This ensures the system achieves a robust, scalable approach to market analysis and decision-making.
### Analyst Team ### Analyst Team
- Fundamentals Analyst: Evaluates company financials and performance metrics, identifying intrinsic values and potential red flags. - Fundamentals Analyst: Evaluates company financials and performance metrics, identifying intrinsic values and potential red flags.
- Sentiment Analyst: Analyzes social media and public sentiment using sentiment scoring algorithms to gauge short-term market mood. - Sentiment Analyst: Analyzes social media and public sentiment using sentiment scoring algorithms to gauge short-term market mood.
- News Analyst: Monitors global news and macroeconomic indicators, interpreting the impact of events on market conditions. - News Analyst: Monitors global news and macroeconomic indicators, interpreting the impact of events on market conditions.
- Technical Analyst: Utilizes technical indicators (like MACD and RSI) to detect trading patterns and forecast price movements. - Technical Analyst: Utilizes technical indicators (like MACD and RSI) to detect trading patterns and forecast price movements.
<p align="center"> <p align="center">
<img src="assets/analyst.png" width="100%" style="display: inline-block; margin: 0 2%;"> <img src="assets/analyst.png" width="100%" style="display: inline-block; margin: 0 2%;">
</p> </p>
### Researcher Team ### Researcher Team
- Comprises both bullish and bearish researchers who critically assess the insights provided by the Analyst Team. Through structured debates, they balance potential gains against inherent risks. - Comprises both bullish and bearish researchers who critically assess the insights provided by the Analyst Team. Through structured debates, they balance potential gains against inherent risks.
<p align="center"> <p align="center">
<img src="assets/researcher.png" width="70%" style="display: inline-block; margin: 0 2%;"> <img src="assets/researcher.png" width="70%" style="display: inline-block; margin: 0 2%;">
</p> </p>
### Trader Agent ### Trader Agent
- Composes reports from the analysts and researchers to make informed trading decisions. It determines the timing and magnitude of trades based on comprehensive market insights. - Composes reports from the analysts and researchers to make informed trading decisions. It determines the timing and magnitude of trades based on comprehensive market insights.
<p align="center"> <p align="center">
<img src="assets/risk.png" width="70%" style="display: inline-block; margin: 0 2%;"> <img src="assets/risk.png" width="70%" style="display: inline-block; margin: 0 2%;">
</p> </p>
### Risk Management and Portfolio Manager ### Risk Management and Portfolio Manager
- Continuously evaluates portfolio risk by assessing market volatility, liquidity, and other risk factors. The risk management team evaluates and adjusts trading strategies, providing assessment reports to the Portfolio Manager for final decision. - Continuously evaluates portfolio risk by assessing market volatility, liquidity, and other risk factors. The risk management team evaluates and adjusts trading strategies, providing assessment reports to the Portfolio Manager for final decision.
- The Portfolio Manager approves/rejects the transaction proposal. If approved, the order will be sent to the simulated exchange and executed. - The Portfolio Manager approves/rejects the transaction proposal. If approved, the order will be sent to the simulated exchange and executed.
<p align="center"> <p align="center">
<img src="assets/trader.png" width="70%" style="display: inline-block; margin: 0 2%;"> <img src="assets/trader.png" width="70%" style="display: inline-block; margin: 0 2%;">
</p> </p>
## Installation and CLI ## Installation and CLI
### Installation ### Installation
Clone TradingAgents: Clone TradingAgents:
```bash ```bash
git clone https://github.com/TauricResearch/TradingAgents.git git clone https://github.com/TauricResearch/TradingAgents.git
cd TradingAgents cd TradingAgents
``` ```
Create a virtual environment in any of your favorite environment managers: Create a virtual environment in any of your favorite environment managers:
```bash ```bash
conda create -n tradingagents python=3.13 conda create -n tradingagents python=3.13
conda activate tradingagents conda activate tradingagents
``` ```
Install dependencies: Install dependencies:
```bash ```bash
pip install -r requirements.txt pip install -r requirements.txt
``` ```
### Required APIs ### Required APIs
You will also need the FinnHub API for financial data. All of our code is implemented with the free tier. You will also need the FinnHub API for financial data. All of our code is implemented with the free tier.
```bash ```bash
export FINNHUB_API_KEY=$YOUR_FINNHUB_API_KEY export FINNHUB_API_KEY=$YOUR_FINNHUB_API_KEY
``` ```
You will need the OpenAI API for all the agents. You will need the OpenAI API for all the agents.
```bash ```bash
export OPENAI_API_KEY=$YOUR_OPENAI_API_KEY export OPENAI_API_KEY=$YOUR_OPENAI_API_KEY
``` ```
### CLI Usage ### CLI Usage
You can also try out the CLI directly by running: You can also try out the CLI directly by running:
```bash ```bash
python -m cli.main python -m cli.main
``` ```
You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc. You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc.
<p align="center"> <p align="center">
<img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;"> <img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;">
</p> </p>
An interface will appear showing results as they load, letting you track the agent's progress as it runs. An interface will appear showing results as they load, letting you track the agent's progress as it runs.
<p align="center"> <p align="center">
<img src="assets/cli/cli_news.png" width="100%" style="display: inline-block; margin: 0 2%;"> <img src="assets/cli/cli_news.png" width="100%" style="display: inline-block; margin: 0 2%;">
</p> </p>
<p align="center"> <p align="center">
<img src="assets/cli/cli_transaction.png" width="100%" style="display: inline-block; margin: 0 2%;"> <img src="assets/cli/cli_transaction.png" width="100%" style="display: inline-block; margin: 0 2%;">
</p> </p>
## TradingAgents Package ## TradingAgents Package
### Implementation Details ### Implementation Details
We built TradingAgents with LangGraph to ensure flexibility and modularity. We utilize `o1-preview` and `gpt-4o` as our deep thinking and fast thinking LLMs for our experiments. However, for testing purposes, we recommend you use `o4-mini` and `gpt-4.1-mini` to save on costs as our framework makes **lots of** API calls. We built TradingAgents with LangGraph to ensure flexibility and modularity. We utilize `o1-preview` and `gpt-4o` as our deep thinking and fast thinking LLMs for our experiments. However, for testing purposes, we recommend you use `o4-mini` and `gpt-4.1-mini` to save on costs as our framework makes **lots of** API calls.
### Python Usage ### Python Usage
To use TradingAgents inside your code, you can import the `tradingagents` module and initialize a `TradingAgentsGraph()` object. The `.propagate()` function will return a decision. You can run `main.py`, here's also a quick example: To use TradingAgents inside your code, you can import the `tradingagents` module and initialize a `TradingAgentsGraph()` object. The `.propagate()` function will return a decision. You can run `main.py`, here's also a quick example:
```python ```python
from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG from tradingagents.default_config import DEFAULT_CONFIG
ta = TradingAgentsGraph(debug=True, config=DEFAULT_CONFIG.copy()) ta = TradingAgentsGraph(debug=True, config=DEFAULT_CONFIG.copy())
# forward propagate # forward propagate
_, decision = ta.propagate("NVDA", "2024-05-10") _, decision = ta.propagate("NVDA", "2024-05-10")
print(decision) print(decision)
``` ```
You can also adjust the default configuration to set your own choice of LLMs, debate rounds, etc. You can also adjust the default configuration to set your own choice of LLMs, debate rounds, etc.
```python ```python
from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG from tradingagents.default_config import DEFAULT_CONFIG
# Create a custom config # Create a custom config
config = DEFAULT_CONFIG.copy() config = DEFAULT_CONFIG.copy()
config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds config["max_debate_rounds"] = 1 # Increase debate rounds
config["online_tools"] = True # Use online tools or cached data config["online_tools"] = True # Use online tools or cached data
# Initialize with custom config # Initialize with custom config
ta = TradingAgentsGraph(debug=True, config=config) ta = TradingAgentsGraph(debug=True, config=config)
# forward propagate # forward propagate
_, decision = ta.propagate("NVDA", "2024-05-10") _, decision = ta.propagate("NVDA", "2024-05-10")
print(decision) print(decision)
``` ```
> For `online_tools`, we recommend enabling them for experimentation, as they provide access to real-time data. The agents' offline tools rely on cached data from our **Tauric TradingDB**, a curated dataset we use for backtesting. We're currently in the process of refining this dataset, and we plan to release it soon alongside our upcoming projects. Stay tuned! > For `online_tools`, we recommend enabling them for experimentation, as they provide access to real-time data. The agents' offline tools rely on cached data from our **Tauric TradingDB**, a curated dataset we use for backtesting. We're currently in the process of refining this dataset, and we plan to release it soon alongside our upcoming projects. Stay tuned!
You can view the full list of configurations in `tradingagents/default_config.py`. You can view the full list of configurations in `tradingagents/default_config.py`.
## Contributing ## Contributing
We welcome contributions from the community! Whether it's fixing a bug, improving documentation, or suggesting a new feature, your input helps make this project better. If you are interested in this line of research, please consider joining our open-source financial AI research community [Tauric Research](https://tauric.ai/). We welcome contributions from the community! Whether it's fixing a bug, improving documentation, or suggesting a new feature, your input helps make this project better. If you are interested in this line of research, please consider joining our open-source financial AI research community [Tauric Research](https://tauric.ai/).
## Citation ## Citation
Please reference our work if you find *TradingAgents* provides you with some help :) Please reference our work if you find *TradingAgents* provides you with some help :)
``` ```
@misc{xiao2025tradingagentsmultiagentsllmfinancial, @misc{xiao2025tradingagentsmultiagentsllmfinancial,
title={TradingAgents: Multi-Agents LLM Financial Trading Framework}, title={TradingAgents: Multi-Agents LLM Financial Trading Framework},
author={Yijia Xiao and Edward Sun and Di Luo and Wei Wang}, author={Yijia Xiao and Edward Sun and Di Luo and Wei Wang},
year={2025}, year={2025},
eprint={2412.20138}, eprint={2412.20138},
archivePrefix={arXiv}, archivePrefix={arXiv},
primaryClass={q-fin.TR}, primaryClass={q-fin.TR},
url={https://arxiv.org/abs/2412.20138}, url={https://arxiv.org/abs/2412.20138},
} }
``` ```

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,10 @@
from enum import Enum from enum import Enum
from typing import List, Optional, Dict from typing import List, Optional, Dict
from pydantic import BaseModel from pydantic import BaseModel
class AnalystType(str, Enum): class AnalystType(str, Enum):
MARKET = "market" MARKET = "market"
SOCIAL = "social" SOCIAL = "social"
NEWS = "news" NEWS = "news"
FUNDAMENTALS = "fundamentals" FUNDAMENTALS = "fundamentals"

View File

@ -1,7 +1,7 @@
______ ___ ___ __ ______ ___ ___ __
/_ __/________ _____/ (_)___ ____ _/ | ____ ____ ____ / /______ /_ __/________ _____/ (_)___ ____ _/ | ____ ____ ____ / /______
/ / / ___/ __ `/ __ / / __ \/ __ `/ /| |/ __ `/ _ \/ __ \/ __/ ___/ / / / ___/ __ `/ __ / / __ \/ __ `/ /| |/ __ `/ _ \/ __ \/ __/ ___/
/ / / / / /_/ / /_/ / / / / / /_/ / ___ / /_/ / __/ / / / /_(__ ) / / / / / /_/ / /_/ / / / / / /_/ / ___ / /_/ / __/ / / / /_(__ )
/_/ /_/ \__,_/\__,_/_/_/ /_/\__, /_/ |_\__, /\___/_/ /_/\__/____/ /_/ /_/ \__,_/\__,_/_/_/ /_/\__, /_/ |_\__, /\___/_/ /_/\__/____/
/____/ /____/ /____/ /____/

View File

@ -1,195 +1,195 @@
import questionary import questionary
from typing import List, Optional, Tuple, Dict from typing import List, Optional, Tuple, Dict
from cli.models import AnalystType from cli.models import AnalystType
ANALYST_ORDER = [ ANALYST_ORDER = [
("Market Analyst", AnalystType.MARKET), ("Market Analyst", AnalystType.MARKET),
("Social Media Analyst", AnalystType.SOCIAL), ("Social Media Analyst", AnalystType.SOCIAL),
("News Analyst", AnalystType.NEWS), ("News Analyst", AnalystType.NEWS),
("Fundamentals Analyst", AnalystType.FUNDAMENTALS), ("Fundamentals Analyst", AnalystType.FUNDAMENTALS),
] ]
def get_ticker() -> str: def get_ticker() -> str:
"""Prompt the user to enter a ticker symbol.""" """Prompt the user to enter a ticker symbol."""
ticker = questionary.text( ticker = questionary.text(
"Enter the ticker symbol to analyze:", "Enter the ticker symbol to analyze:",
validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.", validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.",
style=questionary.Style( style=questionary.Style(
[ [
("text", "fg:green"), ("text", "fg:green"),
("highlighted", "noinherit"), ("highlighted", "noinherit"),
] ]
), ),
).ask() ).ask()
if not ticker: if not ticker:
console.print("\n[red]No ticker symbol provided. Exiting...[/red]") console.print("\n[red]No ticker symbol provided. Exiting...[/red]")
exit(1) exit(1)
return ticker.strip().upper() return ticker.strip().upper()
def get_analysis_date() -> str: def get_analysis_date() -> str:
"""Prompt the user to enter a date in YYYY-MM-DD format.""" """Prompt the user to enter a date in YYYY-MM-DD format."""
import re import re
from datetime import datetime from datetime import datetime
def validate_date(date_str: str) -> bool: def validate_date(date_str: str) -> bool:
if not re.match(r"^\d{4}-\d{2}-\d{2}$", date_str): if not re.match(r"^\d{4}-\d{2}-\d{2}$", date_str):
return False return False
try: try:
datetime.strptime(date_str, "%Y-%m-%d") datetime.strptime(date_str, "%Y-%m-%d")
return True return True
except ValueError: except ValueError:
return False return False
date = questionary.text( date = questionary.text(
"Enter the analysis date (YYYY-MM-DD):", "Enter the analysis date (YYYY-MM-DD):",
validate=lambda x: validate_date(x.strip()) validate=lambda x: validate_date(x.strip())
or "Please enter a valid date in YYYY-MM-DD format.", or "Please enter a valid date in YYYY-MM-DD format.",
style=questionary.Style( style=questionary.Style(
[ [
("text", "fg:green"), ("text", "fg:green"),
("highlighted", "noinherit"), ("highlighted", "noinherit"),
] ]
), ),
).ask() ).ask()
if not date: if not date:
console.print("\n[red]No date provided. Exiting...[/red]") console.print("\n[red]No date provided. Exiting...[/red]")
exit(1) exit(1)
return date.strip() return date.strip()
def select_analysts() -> List[AnalystType]: def select_analysts() -> List[AnalystType]:
"""Select analysts using an interactive checkbox.""" """Select analysts using an interactive checkbox."""
choices = questionary.checkbox( choices = questionary.checkbox(
"Select Your [Analysts Team]:", "Select Your [Analysts Team]:",
choices=[ choices=[
questionary.Choice(display, value=value) for display, value in ANALYST_ORDER questionary.Choice(display, value=value) for display, value in ANALYST_ORDER
], ],
instruction="\n- Press Space to select/unselect analysts\n- Press 'a' to select/unselect all\n- Press Enter when done", instruction="\n- Press Space to select/unselect analysts\n- Press 'a' to select/unselect all\n- Press Enter when done",
validate=lambda x: len(x) > 0 or "You must select at least one analyst.", validate=lambda x: len(x) > 0 or "You must select at least one analyst.",
style=questionary.Style( style=questionary.Style(
[ [
("checkbox-selected", "fg:green"), ("checkbox-selected", "fg:green"),
("selected", "fg:green noinherit"), ("selected", "fg:green noinherit"),
("highlighted", "noinherit"), ("highlighted", "noinherit"),
("pointer", "noinherit"), ("pointer", "noinherit"),
] ]
), ),
).ask() ).ask()
if not choices: if not choices:
console.print("\n[red]No analysts selected. Exiting...[/red]") console.print("\n[red]No analysts selected. Exiting...[/red]")
exit(1) exit(1)
return choices return choices
def select_research_depth() -> int: def select_research_depth() -> int:
"""Select research depth using an interactive selection.""" """Select research depth using an interactive selection."""
# Define research depth options with their corresponding values # Define research depth options with their corresponding values
DEPTH_OPTIONS = [ DEPTH_OPTIONS = [
("Shallow - Quick research, few debate and strategy discussion rounds", 1), ("Shallow - Quick research, few debate and strategy discussion rounds", 1),
("Medium - Middle ground, moderate debate rounds and strategy discussion", 3), ("Medium - Middle ground, moderate debate rounds and strategy discussion", 3),
("Deep - Comprehensive research, in depth debate and strategy discussion", 5), ("Deep - Comprehensive research, in depth debate and strategy discussion", 5),
] ]
choice = questionary.select( choice = questionary.select(
"Select Your [Research Depth]:", "Select Your [Research Depth]:",
choices=[ choices=[
questionary.Choice(display, value=value) for display, value in DEPTH_OPTIONS questionary.Choice(display, value=value) for display, value in DEPTH_OPTIONS
], ],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select", instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style( style=questionary.Style(
[ [
("selected", "fg:yellow noinherit"), ("selected", "fg:yellow noinherit"),
("highlighted", "fg:yellow noinherit"), ("highlighted", "fg:yellow noinherit"),
("pointer", "fg:yellow noinherit"), ("pointer", "fg:yellow noinherit"),
] ]
), ),
).ask() ).ask()
if choice is None: if choice is None:
console.print("\n[red]No research depth selected. Exiting...[/red]") console.print("\n[red]No research depth selected. Exiting...[/red]")
exit(1) exit(1)
return choice return choice
def select_shallow_thinking_agent() -> str: def select_shallow_thinking_agent() -> str:
"""Select shallow thinking llm engine using an interactive selection.""" """Select shallow thinking llm engine using an interactive selection."""
# Define shallow thinking llm engine options with their corresponding model names # Define shallow thinking llm engine options with their corresponding model names
SHALLOW_AGENT_OPTIONS = [ SHALLOW_AGENT_OPTIONS = [
("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"), ("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"),
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"), ("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"), ("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
("GPT-4o - Standard model with solid capabilities", "gpt-4o"), ("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
] ]
choice = questionary.select( choice = questionary.select(
"Select Your [Quick-Thinking LLM Engine]:", "Select Your [Quick-Thinking LLM Engine]:",
choices=[ choices=[
questionary.Choice(display, value=value) questionary.Choice(display, value=value)
for display, value in SHALLOW_AGENT_OPTIONS for display, value in SHALLOW_AGENT_OPTIONS
], ],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select", instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style( style=questionary.Style(
[ [
("selected", "fg:magenta noinherit"), ("selected", "fg:magenta noinherit"),
("highlighted", "fg:magenta noinherit"), ("highlighted", "fg:magenta noinherit"),
("pointer", "fg:magenta noinherit"), ("pointer", "fg:magenta noinherit"),
] ]
), ),
).ask() ).ask()
if choice is None: if choice is None:
console.print( console.print(
"\n[red]No shallow thinking llm engine selected. Exiting...[/red]" "\n[red]No shallow thinking llm engine selected. Exiting...[/red]"
) )
exit(1) exit(1)
return choice return choice
def select_deep_thinking_agent() -> str: def select_deep_thinking_agent() -> str:
"""Select deep thinking llm engine using an interactive selection.""" """Select deep thinking llm engine using an interactive selection."""
# Define deep thinking llm engine options with their corresponding model names # Define deep thinking llm engine options with their corresponding model names
DEEP_AGENT_OPTIONS = [ DEEP_AGENT_OPTIONS = [
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"), ("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"), ("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
("GPT-4o - Standard model with solid capabilities", "gpt-4o"), ("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
("o4-mini - Specialized reasoning model (compact)", "o4-mini"), ("o4-mini - Specialized reasoning model (compact)", "o4-mini"),
("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"), ("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"),
("o3 - Full advanced reasoning model", "o3"), ("o3 - Full advanced reasoning model", "o3"),
("o1 - Premier reasoning and problem-solving model", "o1"), ("o1 - Premier reasoning and problem-solving model", "o1"),
] ]
choice = questionary.select( choice = questionary.select(
"Select Your [Deep-Thinking LLM Engine]:", "Select Your [Deep-Thinking LLM Engine]:",
choices=[ choices=[
questionary.Choice(display, value=value) questionary.Choice(display, value=value)
for display, value in DEEP_AGENT_OPTIONS for display, value in DEEP_AGENT_OPTIONS
], ],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select", instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style( style=questionary.Style(
[ [
("selected", "fg:magenta noinherit"), ("selected", "fg:magenta noinherit"),
("highlighted", "fg:magenta noinherit"), ("highlighted", "fg:magenta noinherit"),
("pointer", "fg:magenta noinherit"), ("pointer", "fg:magenta noinherit"),
] ]
), ),
).ask() ).ask()
if choice is None: if choice is None:
console.print("\n[red]No deep thinking llm engine selected. Exiting...[/red]") console.print("\n[red]No deep thinking llm engine selected. Exiting...[/red]")
exit(1) exit(1)
return choice return choice

38
main.py
View File

@ -1,19 +1,19 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG from tradingagents.default_config import DEFAULT_CONFIG
# Create a custom config # Create a custom config
config = DEFAULT_CONFIG.copy() config = DEFAULT_CONFIG.copy()
config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds config["max_debate_rounds"] = 1 # Increase debate rounds
config["online_tools"] = True # Increase debate rounds config["online_tools"] = True # Increase debate rounds
# Initialize with custom config # Initialize with custom config
ta = TradingAgentsGraph(debug=True, config=config) ta = TradingAgentsGraph(debug=True, config=config)
# forward propagate # forward propagate
_, decision = ta.propagate("NVDA", "2024-05-10") _, decision = ta.propagate("NVDA", "2024-05-10")
print(decision) print(decision)
# Memorize mistakes and reflect # Memorize mistakes and reflect
# ta.reflect_and_remember(1000) # parameter is the position returns # ta.reflect_and_remember(1000) # parameter is the position returns

View File

@ -1,24 +1,24 @@
typing-extensions typing-extensions
langchain-openai langchain-openai
langchain-experimental langchain-experimental
pandas pandas
yfinance yfinance
praw praw
feedparser feedparser
stockstats stockstats
eodhd eodhd
langgraph langgraph
chromadb chromadb
setuptools setuptools
backtrader backtrader
akshare akshare
tushare tushare
finnhub-python finnhub-python
parsel parsel
requests requests
tqdm tqdm
pytz pytz
redis redis
chainlit chainlit
rich rich
questionary questionary

View File

@ -1,43 +1,43 @@
""" """
Setup script for the TradingAgents package. Setup script for the TradingAgents package.
""" """
from setuptools import setup, find_packages from setuptools import setup, find_packages
setup( setup(
name="tradingagents", name="tradingagents",
version="0.1.0", version="0.1.0",
description="Multi-Agents LLM Financial Trading Framework", description="Multi-Agents LLM Financial Trading Framework",
author="TradingAgents Team", author="TradingAgents Team",
author_email="yijia.xiao@cs.ucla.edu", author_email="yijia.xiao@cs.ucla.edu",
url="https://github.com/TauricResearch", url="https://github.com/TauricResearch",
packages=find_packages(), packages=find_packages(),
install_requires=[ install_requires=[
"langchain>=0.1.0", "langchain>=0.1.0",
"langchain-openai>=0.0.2", "langchain-openai>=0.0.2",
"langchain-experimental>=0.0.40", "langchain-experimental>=0.0.40",
"langgraph>=0.0.20", "langgraph>=0.0.20",
"numpy>=1.24.0", "numpy>=1.24.0",
"pandas>=2.0.0", "pandas>=2.0.0",
"praw>=7.7.0", "praw>=7.7.0",
"stockstats>=0.5.4", "stockstats>=0.5.4",
"yfinance>=0.2.31", "yfinance>=0.2.31",
"typer>=0.9.0", "typer>=0.9.0",
"rich>=13.0.0", "rich>=13.0.0",
"questionary>=2.0.1", "questionary>=2.0.1",
], ],
python_requires=">=3.10", python_requires=">=3.10",
entry_points={ entry_points={
"console_scripts": [ "console_scripts": [
"tradingagents=cli.main:app", "tradingagents=cli.main:app",
], ],
}, },
classifiers=[ classifiers=[
"Development Status :: 3 - Alpha", "Development Status :: 3 - Alpha",
"Intended Audience :: Financial and Trading Industry", "Intended Audience :: Financial and Trading Industry",
"License :: OSI Approved :: Apache Software License", "License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.10",
"Topic :: Office/Business :: Financial :: Investment", "Topic :: Office/Business :: Financial :: Investment",
], ],
) )

View File

@ -1,41 +1,41 @@
from .utils.agent_utils import Toolkit, create_msg_delete from .utils.agent_utils import Toolkit, create_msg_delete
from .utils.agent_states import AgentState, InvestDebateState, RiskDebateState from .utils.agent_states import AgentState, InvestDebateState, RiskDebateState
from .utils.memory import FinancialSituationMemory from .utils.memory import FinancialSituationMemory
from .analysts.fundamentals_analyst import create_fundamentals_analyst from .analysts.fundamentals_analyst import create_fundamentals_analyst
from .analysts.market_analyst import create_market_analyst from .analysts.market_analyst import create_market_analyst
from .analysts.news_analyst import create_news_analyst from .analysts.news_analyst import create_news_analyst
from .analysts.social_media_analyst import create_social_media_analyst from .analysts.social_media_analyst import create_social_media_analyst
from .researchers.bear_researcher import create_bear_researcher from .researchers.bear_researcher import create_bear_researcher
from .researchers.bull_researcher import create_bull_researcher from .researchers.bull_researcher import create_bull_researcher
from .risk_mgmt.aggresive_debator import create_risky_debator from .risk_mgmt.aggresive_debator import create_risky_debator
from .risk_mgmt.conservative_debator import create_safe_debator from .risk_mgmt.conservative_debator import create_safe_debator
from .risk_mgmt.neutral_debator import create_neutral_debator from .risk_mgmt.neutral_debator import create_neutral_debator
from .managers.research_manager import create_research_manager from .managers.research_manager import create_research_manager
from .managers.risk_manager import create_risk_manager from .managers.risk_manager import create_risk_manager
from .trader.trader import create_trader from .trader.trader import create_trader
__all__ = [ __all__ = [
"FinancialSituationMemory", "FinancialSituationMemory",
"Toolkit", "Toolkit",
"AgentState", "AgentState",
"create_msg_delete", "create_msg_delete",
"InvestDebateState", "InvestDebateState",
"RiskDebateState", "RiskDebateState",
"create_bear_researcher", "create_bear_researcher",
"create_bull_researcher", "create_bull_researcher",
"create_research_manager", "create_research_manager",
"create_fundamentals_analyst", "create_fundamentals_analyst",
"create_market_analyst", "create_market_analyst",
"create_neutral_debator", "create_neutral_debator",
"create_news_analyst", "create_news_analyst",
"create_risky_debator", "create_risky_debator",
"create_risk_manager", "create_risk_manager",
"create_safe_debator", "create_safe_debator",
"create_social_media_analyst", "create_social_media_analyst",
"create_trader", "create_trader",
] ]

View File

@ -1,59 +1,59 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time import time
import json import json
def create_fundamentals_analyst(llm, toolkit): def create_fundamentals_analyst(llm, toolkit):
def fundamentals_analyst_node(state): def fundamentals_analyst_node(state):
current_date = state["trade_date"] current_date = state["trade_date"]
ticker = state["company_of_interest"] ticker = state["company_of_interest"]
company_name = state["company_of_interest"] company_name = state["company_of_interest"]
if toolkit.config["online_tools"]: if toolkit.config["online_tools"]:
tools = [toolkit.get_fundamentals_openai] tools = [toolkit.get_fundamentals_openai]
else: else:
tools = [ tools = [
toolkit.get_finnhub_company_insider_sentiment, toolkit.get_finnhub_company_insider_sentiment,
toolkit.get_finnhub_company_insider_transactions, toolkit.get_finnhub_company_insider_transactions,
toolkit.get_simfin_balance_sheet, toolkit.get_simfin_balance_sheet,
toolkit.get_simfin_cashflow, toolkit.get_simfin_cashflow,
toolkit.get_simfin_income_stmt, toolkit.get_simfin_income_stmt,
] ]
system_message = ( system_message = (
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ " Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.", + " Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.",
) )
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
[ [
( (
"system", "system",
"You are a helpful AI assistant, collaborating with other assistants." "You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question." " Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools" " If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress." " will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}", "For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]
) )
prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date) prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker) prompt = prompt.partial(ticker=ticker)
chain = prompt | llm.bind_tools(tools) chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"]) result = chain.invoke(state["messages"])
return { return {
"messages": [result], "messages": [result],
"fundamentals_report": result.content, "fundamentals_report": result.content,
} }
return fundamentals_analyst_node return fundamentals_analyst_node

View File

@ -1,84 +1,84 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time import time
import json import json
def create_market_analyst(llm, toolkit): def create_market_analyst(llm, toolkit):
def market_analyst_node(state): def market_analyst_node(state):
current_date = state["trade_date"] current_date = state["trade_date"]
ticker = state["company_of_interest"] ticker = state["company_of_interest"]
company_name = state["company_of_interest"] company_name = state["company_of_interest"]
if toolkit.config["online_tools"]: if toolkit.config["online_tools"]:
tools = [ tools = [
toolkit.get_YFin_data_online, toolkit.get_YFin_data_online,
toolkit.get_stockstats_indicators_report_online, toolkit.get_stockstats_indicators_report_online,
] ]
else: else:
tools = [ tools = [
toolkit.get_YFin_data, toolkit.get_YFin_data,
toolkit.get_stockstats_indicators_report, toolkit.get_stockstats_indicators_report,
] ]
system_message = ( system_message = (
"""You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are: """You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are:
Moving Averages: Moving Averages:
- close_50_sma: 50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals. - close_50_sma: 50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.
- close_200_sma: 200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries. - close_200_sma: 200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries.
- close_10_ema: 10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals. - close_10_ema: 10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals.
MACD Related: MACD Related:
- macd: MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets. - macd: MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets.
- macds: MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives. - macds: MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives.
- macdh: MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets. - macdh: MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets.
Momentum Indicators: Momentum Indicators:
- rsi: RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis. - rsi: RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis.
Volatility Indicators: Volatility Indicators:
- boll: Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals. - boll: Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals.
- boll_ub: Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends. - boll_ub: Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends.
- boll_lb: Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals. - boll_lb: Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals.
- atr: ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy. - atr: ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy.
Volume-Based Indicators: Volume-Based Indicators:
- vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses. - vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses.
- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_YFin_data first to retrieve the CSV that is needed to generate indicators. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" - Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_YFin_data first to retrieve the CSV that is needed to generate indicators. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."""
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
) )
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
[ [
( (
"system", "system",
"You are a helpful AI assistant, collaborating with other assistants." "You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question." " Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools" " If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress." " will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}", "For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]
) )
prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date) prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker) prompt = prompt.partial(ticker=ticker)
chain = prompt | llm.bind_tools(tools) chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"]) result = chain.invoke(state["messages"])
return { return {
"messages": [result], "messages": [result],
"market_report": result.content, "market_report": result.content,
} }
return market_analyst_node return market_analyst_node

View File

@ -1,55 +1,55 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time import time
import json import json
def create_news_analyst(llm, toolkit): def create_news_analyst(llm, toolkit):
def news_analyst_node(state): def news_analyst_node(state):
current_date = state["trade_date"] current_date = state["trade_date"]
ticker = state["company_of_interest"] ticker = state["company_of_interest"]
if toolkit.config["online_tools"]: if toolkit.config["online_tools"]:
tools = [toolkit.get_global_news_openai, toolkit.get_google_news] tools = [toolkit.get_global_news_openai, toolkit.get_google_news]
else: else:
tools = [ tools = [
toolkit.get_finnhub_news, toolkit.get_finnhub_news,
toolkit.get_reddit_news, toolkit.get_reddit_news,
toolkit.get_google_news, toolkit.get_google_news,
] ]
system_message = ( system_message = (
"You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Look at news from EODHD, and finnhub to be comprehensive. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Look at news from EODHD, and finnhub to be comprehensive. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.""" + """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read."""
) )
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
[ [
( (
"system", "system",
"You are a helpful AI assistant, collaborating with other assistants." "You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question." " Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools" " If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress." " will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. We are looking at the company {ticker}", "For your reference, the current date is {current_date}. We are looking at the company {ticker}",
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]
) )
prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date) prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker) prompt = prompt.partial(ticker=ticker)
chain = prompt | llm.bind_tools(tools) chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"]) result = chain.invoke(state["messages"])
return { return {
"messages": [result], "messages": [result],
"news_report": result.content, "news_report": result.content,
} }
return news_analyst_node return news_analyst_node

View File

@ -1,55 +1,55 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time import time
import json import json
def create_social_media_analyst(llm, toolkit): def create_social_media_analyst(llm, toolkit):
def social_media_analyst_node(state): def social_media_analyst_node(state):
current_date = state["trade_date"] current_date = state["trade_date"]
ticker = state["company_of_interest"] ticker = state["company_of_interest"]
company_name = state["company_of_interest"] company_name = state["company_of_interest"]
if toolkit.config["online_tools"]: if toolkit.config["online_tools"]:
tools = [toolkit.get_stock_news_openai] tools = [toolkit.get_stock_news_openai]
else: else:
tools = [ tools = [
toolkit.get_reddit_stock_info, toolkit.get_reddit_stock_info,
] ]
system_message = ( system_message = (
"You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.""", + """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.""",
) )
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
[ [
( (
"system", "system",
"You are a helpful AI assistant, collaborating with other assistants." "You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question." " Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools" " If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress." " will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}", "For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}",
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]
) )
prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date) prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker) prompt = prompt.partial(ticker=ticker)
chain = prompt | llm.bind_tools(tools) chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"]) result = chain.invoke(state["messages"])
return { return {
"messages": [result], "messages": [result],
"sentiment_report": result.content, "sentiment_report": result.content,
} }
return social_media_analyst_node return social_media_analyst_node

View File

@ -1,55 +1,55 @@
import time import time
import json import json
def create_research_manager(llm, memory): def create_research_manager(llm, memory):
def research_manager_node(state) -> dict: def research_manager_node(state) -> dict:
history = state["investment_debate_state"].get("history", "") history = state["investment_debate_state"].get("history", "")
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
news_report = state["news_report"] news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"] fundamentals_report = state["fundamentals_report"]
investment_debate_state = state["investment_debate_state"] investment_debate_state = state["investment_debate_state"]
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2) past_memories = memory.get_memories(curr_situation, n_matches=2)
past_memory_str = "" past_memory_str = ""
for i, rec in enumerate(past_memories, 1): for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n" past_memory_str += rec["recommendation"] + "\n\n"
prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented. prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented.
Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning. Your recommendationBuy, Sell, or Holdmust be clear and actionable. Avoid defaulting to Hold simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments. Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning. Your recommendationBuy, Sell, or Holdmust be clear and actionable. Avoid defaulting to Hold simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments.
Additionally, develop a detailed investment plan for the trader. This should include: Additionally, develop a detailed investment plan for the trader. This should include:
Your Recommendation: A decisive stance supported by the most convincing arguments. Your Recommendation: A decisive stance supported by the most convincing arguments.
Rationale: An explanation of why these arguments lead to your conclusion. Rationale: An explanation of why these arguments lead to your conclusion.
Strategic Actions: Concrete steps for implementing the recommendation. Strategic Actions: Concrete steps for implementing the recommendation.
Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting. Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting.
Here are your past reflections on mistakes: Here are your past reflections on mistakes:
\"{past_memory_str}\" \"{past_memory_str}\"
Here is the debate: Here is the debate:
Debate History: Debate History:
{history}""" {history}"""
response = llm.invoke(prompt) response = llm.invoke(prompt)
new_investment_debate_state = { new_investment_debate_state = {
"judge_decision": response.content, "judge_decision": response.content,
"history": investment_debate_state.get("history", ""), "history": investment_debate_state.get("history", ""),
"bear_history": investment_debate_state.get("bear_history", ""), "bear_history": investment_debate_state.get("bear_history", ""),
"bull_history": investment_debate_state.get("bull_history", ""), "bull_history": investment_debate_state.get("bull_history", ""),
"current_response": response.content, "current_response": response.content,
"count": investment_debate_state["count"], "count": investment_debate_state["count"],
} }
return { return {
"investment_debate_state": new_investment_debate_state, "investment_debate_state": new_investment_debate_state,
"investment_plan": response.content, "investment_plan": response.content,
} }
return research_manager_node return research_manager_node

View File

@ -1,66 +1,66 @@
import time import time
import json import json
def create_risk_manager(llm, memory): def create_risk_manager(llm, memory):
def risk_manager_node(state) -> dict: def risk_manager_node(state) -> dict:
company_name = state["company_of_interest"] company_name = state["company_of_interest"]
history = state["risk_debate_state"]["history"] history = state["risk_debate_state"]["history"]
risk_debate_state = state["risk_debate_state"] risk_debate_state = state["risk_debate_state"]
market_research_report = state["market_report"] market_research_report = state["market_report"]
news_report = state["news_report"] news_report = state["news_report"]
fundamentals_report = state["news_report"] fundamentals_report = state["news_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
trader_plan = state["investment_plan"] trader_plan = state["investment_plan"]
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2) past_memories = memory.get_memories(curr_situation, n_matches=2)
past_memory_str = "" past_memory_str = ""
for i, rec in enumerate(past_memories, 1): for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n" past_memory_str += rec["recommendation"] + "\n\n"
prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Risky, Neutral, and Safe/Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness. prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Risky, Neutral, and Safe/Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness.
Guidelines for Decision-Making: Guidelines for Decision-Making:
1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context. 1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context.
2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate. 2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate.
3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights. 3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights.
4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money. 4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money.
Deliverables: Deliverables:
- A clear and actionable recommendation: Buy, Sell, or Hold. - A clear and actionable recommendation: Buy, Sell, or Hold.
- Detailed reasoning anchored in the debate and past reflections. - Detailed reasoning anchored in the debate and past reflections.
--- ---
**Analysts Debate History:** **Analysts Debate History:**
{history} {history}
--- ---
Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes.""" Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes."""
response = llm.invoke(prompt) response = llm.invoke(prompt)
new_risk_debate_state = { new_risk_debate_state = {
"judge_decision": response.content, "judge_decision": response.content,
"history": risk_debate_state["history"], "history": risk_debate_state["history"],
"risky_history": risk_debate_state["risky_history"], "risky_history": risk_debate_state["risky_history"],
"safe_history": risk_debate_state["safe_history"], "safe_history": risk_debate_state["safe_history"],
"neutral_history": risk_debate_state["neutral_history"], "neutral_history": risk_debate_state["neutral_history"],
"latest_speaker": "Judge", "latest_speaker": "Judge",
"current_risky_response": risk_debate_state["current_risky_response"], "current_risky_response": risk_debate_state["current_risky_response"],
"current_safe_response": risk_debate_state["current_safe_response"], "current_safe_response": risk_debate_state["current_safe_response"],
"current_neutral_response": risk_debate_state["current_neutral_response"], "current_neutral_response": risk_debate_state["current_neutral_response"],
"count": risk_debate_state["count"], "count": risk_debate_state["count"],
} }
return { return {
"risk_debate_state": new_risk_debate_state, "risk_debate_state": new_risk_debate_state,
"final_trade_decision": response.content, "final_trade_decision": response.content,
} }
return risk_manager_node return risk_manager_node

View File

@ -1,61 +1,61 @@
from langchain_core.messages import AIMessage from langchain_core.messages import AIMessage
import time import time
import json import json
def create_bear_researcher(llm, memory): def create_bear_researcher(llm, memory):
def bear_node(state) -> dict: def bear_node(state) -> dict:
investment_debate_state = state["investment_debate_state"] investment_debate_state = state["investment_debate_state"]
history = investment_debate_state.get("history", "") history = investment_debate_state.get("history", "")
bear_history = investment_debate_state.get("bear_history", "") bear_history = investment_debate_state.get("bear_history", "")
current_response = investment_debate_state.get("current_response", "") current_response = investment_debate_state.get("current_response", "")
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
news_report = state["news_report"] news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"] fundamentals_report = state["fundamentals_report"]
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2) past_memories = memory.get_memories(curr_situation, n_matches=2)
past_memory_str = "" past_memory_str = ""
for i, rec in enumerate(past_memories, 1): for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n" past_memory_str += rec["recommendation"] + "\n\n"
prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively. prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively.
Key points to focus on: Key points to focus on:
- Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could hinder the stock's performance. - Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could hinder the stock's performance.
- Competitive Weaknesses: Emphasize vulnerabilities such as weaker market positioning, declining innovation, or threats from competitors. - Competitive Weaknesses: Emphasize vulnerabilities such as weaker market positioning, declining innovation, or threats from competitors.
- Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support your position. - Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support your position.
- Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions. - Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions.
- Engagement: Present your argument in a conversational style, directly engaging with the bull analyst's points and debating effectively rather than simply listing facts. - Engagement: Present your argument in a conversational style, directly engaging with the bull analyst's points and debating effectively rather than simply listing facts.
Resources available: Resources available:
Market research report: {market_research_report} Market research report: {market_research_report}
Social media sentiment report: {sentiment_report} Social media sentiment report: {sentiment_report}
Latest world affairs news: {news_report} Latest world affairs news: {news_report}
Company fundamentals report: {fundamentals_report} Company fundamentals report: {fundamentals_report}
Conversation history of the debate: {history} Conversation history of the debate: {history}
Last bull argument: {current_response} Last bull argument: {current_response}
Reflections from similar situations and lessons learned: {past_memory_str} Reflections from similar situations and lessons learned: {past_memory_str}
Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past. Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past.
""" """
response = llm.invoke(prompt) response = llm.invoke(prompt)
argument = f"Bear Analyst: {response.content}" argument = f"Bear Analyst: {response.content}"
new_investment_debate_state = { new_investment_debate_state = {
"history": history + "\n" + argument, "history": history + "\n" + argument,
"bear_history": bear_history + "\n" + argument, "bear_history": bear_history + "\n" + argument,
"bull_history": investment_debate_state.get("bull_history", ""), "bull_history": investment_debate_state.get("bull_history", ""),
"current_response": argument, "current_response": argument,
"count": investment_debate_state["count"] + 1, "count": investment_debate_state["count"] + 1,
} }
return {"investment_debate_state": new_investment_debate_state} return {"investment_debate_state": new_investment_debate_state}
return bear_node return bear_node

View File

@ -1,59 +1,59 @@
from langchain_core.messages import AIMessage from langchain_core.messages import AIMessage
import time import time
import json import json
def create_bull_researcher(llm, memory): def create_bull_researcher(llm, memory):
def bull_node(state) -> dict: def bull_node(state) -> dict:
investment_debate_state = state["investment_debate_state"] investment_debate_state = state["investment_debate_state"]
history = investment_debate_state.get("history", "") history = investment_debate_state.get("history", "")
bull_history = investment_debate_state.get("bull_history", "") bull_history = investment_debate_state.get("bull_history", "")
current_response = investment_debate_state.get("current_response", "") current_response = investment_debate_state.get("current_response", "")
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
news_report = state["news_report"] news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"] fundamentals_report = state["fundamentals_report"]
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2) past_memories = memory.get_memories(curr_situation, n_matches=2)
past_memory_str = "" past_memory_str = ""
for i, rec in enumerate(past_memories, 1): for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n" past_memory_str += rec["recommendation"] + "\n\n"
prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively. prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively.
Key points to focus on: Key points to focus on:
- Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability. - Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability.
- Competitive Advantages: Emphasize factors like unique products, strong branding, or dominant market positioning. - Competitive Advantages: Emphasize factors like unique products, strong branding, or dominant market positioning.
- Positive Indicators: Use financial health, industry trends, and recent positive news as evidence. - Positive Indicators: Use financial health, industry trends, and recent positive news as evidence.
- Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why the bull perspective holds stronger merit. - Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why the bull perspective holds stronger merit.
- Engagement: Present your argument in a conversational style, engaging directly with the bear analyst's points and debating effectively rather than just listing data. - Engagement: Present your argument in a conversational style, engaging directly with the bear analyst's points and debating effectively rather than just listing data.
Resources available: Resources available:
Market research report: {market_research_report} Market research report: {market_research_report}
Social media sentiment report: {sentiment_report} Social media sentiment report: {sentiment_report}
Latest world affairs news: {news_report} Latest world affairs news: {news_report}
Company fundamentals report: {fundamentals_report} Company fundamentals report: {fundamentals_report}
Conversation history of the debate: {history} Conversation history of the debate: {history}
Last bear argument: {current_response} Last bear argument: {current_response}
Reflections from similar situations and lessons learned: {past_memory_str} Reflections from similar situations and lessons learned: {past_memory_str}
Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past. Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past.
""" """
response = llm.invoke(prompt) response = llm.invoke(prompt)
argument = f"Bull Analyst: {response.content}" argument = f"Bull Analyst: {response.content}"
new_investment_debate_state = { new_investment_debate_state = {
"history": history + "\n" + argument, "history": history + "\n" + argument,
"bull_history": bull_history + "\n" + argument, "bull_history": bull_history + "\n" + argument,
"bear_history": investment_debate_state.get("bear_history", ""), "bear_history": investment_debate_state.get("bear_history", ""),
"current_response": argument, "current_response": argument,
"count": investment_debate_state["count"] + 1, "count": investment_debate_state["count"] + 1,
} }
return {"investment_debate_state": new_investment_debate_state} return {"investment_debate_state": new_investment_debate_state}
return bull_node return bull_node

View File

@ -1,55 +1,55 @@
import time import time
import json import json
def create_risky_debator(llm): def create_risky_debator(llm):
def risky_node(state) -> dict: def risky_node(state) -> dict:
risk_debate_state = state["risk_debate_state"] risk_debate_state = state["risk_debate_state"]
history = risk_debate_state.get("history", "") history = risk_debate_state.get("history", "")
risky_history = risk_debate_state.get("risky_history", "") risky_history = risk_debate_state.get("risky_history", "")
current_safe_response = risk_debate_state.get("current_safe_response", "") current_safe_response = risk_debate_state.get("current_safe_response", "")
current_neutral_response = risk_debate_state.get("current_neutral_response", "") current_neutral_response = risk_debate_state.get("current_neutral_response", "")
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
news_report = state["news_report"] news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"] fundamentals_report = state["fundamentals_report"]
trader_decision = state["trader_investment_plan"] trader_decision = state["trader_investment_plan"]
prompt = f"""As the Risky Risk Analyst, your role is to actively champion high-reward, high-risk opportunities, emphasizing bold strategies and competitive advantages. When evaluating the trader's decision or plan, focus intently on the potential upside, growth potential, and innovative benefits—even when these come with elevated risk. Use the provided market data and sentiment analysis to strengthen your arguments and challenge the opposing views. Specifically, respond directly to each point made by the conservative and neutral analysts, countering with data-driven rebuttals and persuasive reasoning. Highlight where their caution might miss critical opportunities or where their assumptions may be overly conservative. Here is the trader's decision: prompt = f"""As the Risky Risk Analyst, your role is to actively champion high-reward, high-risk opportunities, emphasizing bold strategies and competitive advantages. When evaluating the trader's decision or plan, focus intently on the potential upside, growth potential, and innovative benefits—even when these come with elevated risk. Use the provided market data and sentiment analysis to strengthen your arguments and challenge the opposing views. Specifically, respond directly to each point made by the conservative and neutral analysts, countering with data-driven rebuttals and persuasive reasoning. Highlight where their caution might miss critical opportunities or where their assumptions may be overly conservative. Here is the trader's decision:
{trader_decision} {trader_decision}
Your task is to create a compelling case for the trader's decision by questioning and critiquing the conservative and neutral stances to demonstrate why your high-reward perspective offers the best path forward. Incorporate insights from the following sources into your arguments: Your task is to create a compelling case for the trader's decision by questioning and critiquing the conservative and neutral stances to demonstrate why your high-reward perspective offers the best path forward. Incorporate insights from the following sources into your arguments:
Market Research Report: {market_research_report} Market Research Report: {market_research_report}
Social Media Sentiment Report: {sentiment_report} Social Media Sentiment Report: {sentiment_report}
Latest World Affairs Report: {news_report} Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_safe_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_safe_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting.""" Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting."""
response = llm.invoke(prompt) response = llm.invoke(prompt)
argument = f"Risky Analyst: {response.content}" argument = f"Risky Analyst: {response.content}"
new_risk_debate_state = { new_risk_debate_state = {
"history": history + "\n" + argument, "history": history + "\n" + argument,
"risky_history": risky_history + "\n" + argument, "risky_history": risky_history + "\n" + argument,
"safe_history": risk_debate_state.get("safe_history", ""), "safe_history": risk_debate_state.get("safe_history", ""),
"neutral_history": risk_debate_state.get("neutral_history", ""), "neutral_history": risk_debate_state.get("neutral_history", ""),
"latest_speaker": "Risky", "latest_speaker": "Risky",
"current_risky_response": argument, "current_risky_response": argument,
"current_safe_response": risk_debate_state.get("current_safe_response", ""), "current_safe_response": risk_debate_state.get("current_safe_response", ""),
"current_neutral_response": risk_debate_state.get( "current_neutral_response": risk_debate_state.get(
"current_neutral_response", "" "current_neutral_response", ""
), ),
"count": risk_debate_state["count"] + 1, "count": risk_debate_state["count"] + 1,
} }
return {"risk_debate_state": new_risk_debate_state} return {"risk_debate_state": new_risk_debate_state}
return risky_node return risky_node

View File

@ -1,58 +1,58 @@
from langchain_core.messages import AIMessage from langchain_core.messages import AIMessage
import time import time
import json import json
def create_safe_debator(llm): def create_safe_debator(llm):
def safe_node(state) -> dict: def safe_node(state) -> dict:
risk_debate_state = state["risk_debate_state"] risk_debate_state = state["risk_debate_state"]
history = risk_debate_state.get("history", "") history = risk_debate_state.get("history", "")
safe_history = risk_debate_state.get("safe_history", "") safe_history = risk_debate_state.get("safe_history", "")
current_risky_response = risk_debate_state.get("current_risky_response", "") current_risky_response = risk_debate_state.get("current_risky_response", "")
current_neutral_response = risk_debate_state.get("current_neutral_response", "") current_neutral_response = risk_debate_state.get("current_neutral_response", "")
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
news_report = state["news_report"] news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"] fundamentals_report = state["fundamentals_report"]
trader_decision = state["trader_investment_plan"] trader_decision = state["trader_investment_plan"]
prompt = f"""As the Safe/Conservative Risk Analyst, your primary objective is to protect assets, minimize volatility, and ensure steady, reliable growth. You prioritize stability, security, and risk mitigation, carefully assessing potential losses, economic downturns, and market volatility. When evaluating the trader's decision or plan, critically examine high-risk elements, pointing out where the decision may expose the firm to undue risk and where more cautious alternatives could secure long-term gains. Here is the trader's decision: prompt = f"""As the Safe/Conservative Risk Analyst, your primary objective is to protect assets, minimize volatility, and ensure steady, reliable growth. You prioritize stability, security, and risk mitigation, carefully assessing potential losses, economic downturns, and market volatility. When evaluating the trader's decision or plan, critically examine high-risk elements, pointing out where the decision may expose the firm to undue risk and where more cautious alternatives could secure long-term gains. Here is the trader's decision:
{trader_decision} {trader_decision}
Your task is to actively counter the arguments of the Risky and Neutral Analysts, highlighting where their views may overlook potential threats or fail to prioritize sustainability. Respond directly to their points, drawing from the following data sources to build a convincing case for a low-risk approach adjustment to the trader's decision: Your task is to actively counter the arguments of the Risky and Neutral Analysts, highlighting where their views may overlook potential threats or fail to prioritize sustainability. Respond directly to their points, drawing from the following data sources to build a convincing case for a low-risk approach adjustment to the trader's decision:
Market Research Report: {market_research_report} Market Research Report: {market_research_report}
Social Media Sentiment Report: {sentiment_report} Social Media Sentiment Report: {sentiment_report}
Latest World Affairs Report: {news_report} Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting.""" Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting."""
response = llm.invoke(prompt) response = llm.invoke(prompt)
argument = f"Safe Analyst: {response.content}" argument = f"Safe Analyst: {response.content}"
new_risk_debate_state = { new_risk_debate_state = {
"history": history + "\n" + argument, "history": history + "\n" + argument,
"risky_history": risk_debate_state.get("risky_history", ""), "risky_history": risk_debate_state.get("risky_history", ""),
"safe_history": safe_history + "\n" + argument, "safe_history": safe_history + "\n" + argument,
"neutral_history": risk_debate_state.get("neutral_history", ""), "neutral_history": risk_debate_state.get("neutral_history", ""),
"latest_speaker": "Safe", "latest_speaker": "Safe",
"current_risky_response": risk_debate_state.get( "current_risky_response": risk_debate_state.get(
"current_risky_response", "" "current_risky_response", ""
), ),
"current_safe_response": argument, "current_safe_response": argument,
"current_neutral_response": risk_debate_state.get( "current_neutral_response": risk_debate_state.get(
"current_neutral_response", "" "current_neutral_response", ""
), ),
"count": risk_debate_state["count"] + 1, "count": risk_debate_state["count"] + 1,
} }
return {"risk_debate_state": new_risk_debate_state} return {"risk_debate_state": new_risk_debate_state}
return safe_node return safe_node

View File

@ -1,55 +1,55 @@
import time import time
import json import json
def create_neutral_debator(llm): def create_neutral_debator(llm):
def neutral_node(state) -> dict: def neutral_node(state) -> dict:
risk_debate_state = state["risk_debate_state"] risk_debate_state = state["risk_debate_state"]
history = risk_debate_state.get("history", "") history = risk_debate_state.get("history", "")
neutral_history = risk_debate_state.get("neutral_history", "") neutral_history = risk_debate_state.get("neutral_history", "")
current_risky_response = risk_debate_state.get("current_risky_response", "") current_risky_response = risk_debate_state.get("current_risky_response", "")
current_safe_response = risk_debate_state.get("current_safe_response", "") current_safe_response = risk_debate_state.get("current_safe_response", "")
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
news_report = state["news_report"] news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"] fundamentals_report = state["fundamentals_report"]
trader_decision = state["trader_investment_plan"] trader_decision = state["trader_investment_plan"]
prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced perspective, weighing both the potential benefits and risks of the trader's decision or plan. You prioritize a well-rounded approach, evaluating the upsides and downsides while factoring in broader market trends, potential economic shifts, and diversification strategies.Here is the trader's decision: prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced perspective, weighing both the potential benefits and risks of the trader's decision or plan. You prioritize a well-rounded approach, evaluating the upsides and downsides while factoring in broader market trends, potential economic shifts, and diversification strategies.Here is the trader's decision:
{trader_decision} {trader_decision}
Your task is to challenge both the Risky and Safe Analysts, pointing out where each perspective may be overly optimistic or overly cautious. Use insights from the following data sources to support a moderate, sustainable strategy to adjust the trader's decision: Your task is to challenge both the Risky and Safe Analysts, pointing out where each perspective may be overly optimistic or overly cautious. Use insights from the following data sources to support a moderate, sustainable strategy to adjust the trader's decision:
Market Research Report: {market_research_report} Market Research Report: {market_research_report}
Social Media Sentiment Report: {sentiment_report} Social Media Sentiment Report: {sentiment_report}
Latest World Affairs Report: {news_report} Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the safe analyst: {current_safe_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the safe analyst: {current_safe_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
Engage actively by analyzing both sides critically, addressing weaknesses in the risky and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting.""" Engage actively by analyzing both sides critically, addressing weaknesses in the risky and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting."""
response = llm.invoke(prompt) response = llm.invoke(prompt)
argument = f"Neutral Analyst: {response.content}" argument = f"Neutral Analyst: {response.content}"
new_risk_debate_state = { new_risk_debate_state = {
"history": history + "\n" + argument, "history": history + "\n" + argument,
"risky_history": risk_debate_state.get("risky_history", ""), "risky_history": risk_debate_state.get("risky_history", ""),
"safe_history": risk_debate_state.get("safe_history", ""), "safe_history": risk_debate_state.get("safe_history", ""),
"neutral_history": neutral_history + "\n" + argument, "neutral_history": neutral_history + "\n" + argument,
"latest_speaker": "Neutral", "latest_speaker": "Neutral",
"current_risky_response": risk_debate_state.get( "current_risky_response": risk_debate_state.get(
"current_risky_response", "" "current_risky_response", ""
), ),
"current_safe_response": risk_debate_state.get("current_safe_response", ""), "current_safe_response": risk_debate_state.get("current_safe_response", ""),
"current_neutral_response": argument, "current_neutral_response": argument,
"count": risk_debate_state["count"] + 1, "count": risk_debate_state["count"] + 1,
} }
return {"risk_debate_state": new_risk_debate_state} return {"risk_debate_state": new_risk_debate_state}
return neutral_node return neutral_node

View File

@ -1,43 +1,43 @@
import functools import functools
import time import time
import json import json
def create_trader(llm, memory): def create_trader(llm, memory):
def trader_node(state, name): def trader_node(state, name):
company_name = state["company_of_interest"] company_name = state["company_of_interest"]
investment_plan = state["investment_plan"] investment_plan = state["investment_plan"]
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
news_report = state["news_report"] news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"] fundamentals_report = state["fundamentals_report"]
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2) past_memories = memory.get_memories(curr_situation, n_matches=2)
past_memory_str = "" past_memory_str = ""
for i, rec in enumerate(past_memories, 1): for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n" past_memory_str += rec["recommendation"] + "\n\n"
context = { context = {
"role": "user", "role": "user",
"content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.", "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.",
} }
messages = [ messages = [
{ {
"role": "system", "role": "system",
"content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""", "content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""",
}, },
context, context,
] ]
result = llm.invoke(messages) result = llm.invoke(messages)
return { return {
"messages": [result], "messages": [result],
"trader_investment_plan": result.content, "trader_investment_plan": result.content,
"sender": name, "sender": name,
} }
return functools.partial(trader_node, name="Trader") return functools.partial(trader_node, name="Trader")

View File

@ -1,76 +1,76 @@
from typing import Annotated, Sequence from typing import Annotated, Sequence
from datetime import date, timedelta, datetime from datetime import date, timedelta, datetime
from typing_extensions import TypedDict, Optional from typing_extensions import TypedDict, Optional
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from tradingagents.agents import * from tradingagents.agents import *
from langgraph.prebuilt import ToolNode from langgraph.prebuilt import ToolNode
from langgraph.graph import END, StateGraph, START, MessagesState from langgraph.graph import END, StateGraph, START, MessagesState
# Researcher team state # Researcher team state
class InvestDebateState(TypedDict): class InvestDebateState(TypedDict):
bull_history: Annotated[ bull_history: Annotated[
str, "Bullish Conversation history" str, "Bullish Conversation history"
] # Bullish Conversation history ] # Bullish Conversation history
bear_history: Annotated[ bear_history: Annotated[
str, "Bearish Conversation history" str, "Bearish Conversation history"
] # Bullish Conversation history ] # Bullish Conversation history
history: Annotated[str, "Conversation history"] # Conversation history history: Annotated[str, "Conversation history"] # Conversation history
current_response: Annotated[str, "Latest response"] # Last response current_response: Annotated[str, "Latest response"] # Last response
judge_decision: Annotated[str, "Final judge decision"] # Last response judge_decision: Annotated[str, "Final judge decision"] # Last response
count: Annotated[int, "Length of the current conversation"] # Conversation length count: Annotated[int, "Length of the current conversation"] # Conversation length
# Risk management team state # Risk management team state
class RiskDebateState(TypedDict): class RiskDebateState(TypedDict):
risky_history: Annotated[ risky_history: Annotated[
str, "Risky Agent's Conversation history" str, "Risky Agent's Conversation history"
] # Conversation history ] # Conversation history
safe_history: Annotated[ safe_history: Annotated[
str, "Safe Agent's Conversation history" str, "Safe Agent's Conversation history"
] # Conversation history ] # Conversation history
neutral_history: Annotated[ neutral_history: Annotated[
str, "Neutral Agent's Conversation history" str, "Neutral Agent's Conversation history"
] # Conversation history ] # Conversation history
history: Annotated[str, "Conversation history"] # Conversation history history: Annotated[str, "Conversation history"] # Conversation history
latest_speaker: Annotated[str, "Analyst that spoke last"] latest_speaker: Annotated[str, "Analyst that spoke last"]
current_risky_response: Annotated[ current_risky_response: Annotated[
str, "Latest response by the risky analyst" str, "Latest response by the risky analyst"
] # Last response ] # Last response
current_safe_response: Annotated[ current_safe_response: Annotated[
str, "Latest response by the safe analyst" str, "Latest response by the safe analyst"
] # Last response ] # Last response
current_neutral_response: Annotated[ current_neutral_response: Annotated[
str, "Latest response by the neutral analyst" str, "Latest response by the neutral analyst"
] # Last response ] # Last response
judge_decision: Annotated[str, "Judge's decision"] judge_decision: Annotated[str, "Judge's decision"]
count: Annotated[int, "Length of the current conversation"] # Conversation length count: Annotated[int, "Length of the current conversation"] # Conversation length
class AgentState(MessagesState): class AgentState(MessagesState):
company_of_interest: Annotated[str, "Company that we are interested in trading"] company_of_interest: Annotated[str, "Company that we are interested in trading"]
trade_date: Annotated[str, "What date we are trading at"] trade_date: Annotated[str, "What date we are trading at"]
sender: Annotated[str, "Agent that sent this message"] sender: Annotated[str, "Agent that sent this message"]
# research step # research step
market_report: Annotated[str, "Report from the Market Analyst"] market_report: Annotated[str, "Report from the Market Analyst"]
sentiment_report: Annotated[str, "Report from the Social Media Analyst"] sentiment_report: Annotated[str, "Report from the Social Media Analyst"]
news_report: Annotated[ news_report: Annotated[
str, "Report from the News Researcher of current world affairs" str, "Report from the News Researcher of current world affairs"
] ]
fundamentals_report: Annotated[str, "Report from the Fundamentals Researcher"] fundamentals_report: Annotated[str, "Report from the Fundamentals Researcher"]
# researcher team discussion step # researcher team discussion step
investment_debate_state: Annotated[ investment_debate_state: Annotated[
InvestDebateState, "Current state of the debate on if to invest or not" InvestDebateState, "Current state of the debate on if to invest or not"
] ]
investment_plan: Annotated[str, "Plan generated by the Analyst"] investment_plan: Annotated[str, "Plan generated by the Analyst"]
trader_investment_plan: Annotated[str, "Plan generated by the Trader"] trader_investment_plan: Annotated[str, "Plan generated by the Trader"]
# risk management team discussion step # risk management team discussion step
risk_debate_state: Annotated[ risk_debate_state: Annotated[
RiskDebateState, "Current state of the debate on evaluating risk" RiskDebateState, "Current state of the debate on evaluating risk"
] ]
final_trade_decision: Annotated[str, "Final decision made by the Risk Analysts"] final_trade_decision: Annotated[str, "Final decision made by the Risk Analysts"]

View File

@ -1,411 +1,411 @@
from langchain_core.messages import BaseMessage, HumanMessage, ToolMessage, AIMessage from langchain_core.messages import BaseMessage, HumanMessage, ToolMessage, AIMessage
from typing import List from typing import List
from typing import Annotated from typing import Annotated
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import RemoveMessage from langchain_core.messages import RemoveMessage
from langchain_core.tools import tool from langchain_core.tools import tool
from datetime import date, timedelta, datetime from datetime import date, timedelta, datetime
import functools import functools
import pandas as pd import pandas as pd
import os import os
from dateutil.relativedelta import relativedelta from dateutil.relativedelta import relativedelta
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
import tradingagents.dataflows.interface as interface import tradingagents.dataflows.interface as interface
from tradingagents.default_config import DEFAULT_CONFIG from tradingagents.default_config import DEFAULT_CONFIG
def create_msg_delete(): def create_msg_delete():
def delete_messages(state): def delete_messages(state):
"""To prevent message history from overflowing, regularly clear message history after a stage of the pipeline is done""" """To prevent message history from overflowing, regularly clear message history after a stage of the pipeline is done"""
messages = state["messages"] messages = state["messages"]
return {"messages": [RemoveMessage(id=m.id) for m in messages]} return {"messages": [RemoveMessage(id=m.id) for m in messages]}
return delete_messages return delete_messages
class Toolkit: class Toolkit:
_config = DEFAULT_CONFIG.copy() _config = DEFAULT_CONFIG.copy()
@classmethod @classmethod
def update_config(cls, config): def update_config(cls, config):
"""Update the class-level configuration.""" """Update the class-level configuration."""
cls._config.update(config) cls._config.update(config)
@property @property
def config(self): def config(self):
"""Access the configuration.""" """Access the configuration."""
return self._config return self._config
def __init__(self, config=None): def __init__(self, config=None):
if config: if config:
self.update_config(config) self.update_config(config)
@staticmethod @staticmethod
@tool @tool
def get_reddit_news( def get_reddit_news(
curr_date: Annotated[str, "Date you want to get news for in yyyy-mm-dd format"], curr_date: Annotated[str, "Date you want to get news for in yyyy-mm-dd format"],
) -> str: ) -> str:
""" """
Retrieve global news from Reddit within a specified time frame. Retrieve global news from Reddit within a specified time frame.
Args: Args:
curr_date (str): Date you want to get news for in yyyy-mm-dd format curr_date (str): Date you want to get news for in yyyy-mm-dd format
Returns: Returns:
str: A formatted dataframe containing the latest global news from Reddit in the specified time frame. str: A formatted dataframe containing the latest global news from Reddit in the specified time frame.
""" """
global_news_result = interface.get_reddit_global_news(curr_date, 7, 5) global_news_result = interface.get_reddit_global_news(curr_date, 7, 5)
return global_news_result return global_news_result
@staticmethod @staticmethod
@tool @tool
def get_finnhub_news( def get_finnhub_news(
ticker: Annotated[ ticker: Annotated[
str, str,
"Search query of a company, e.g. 'AAPL, TSM, etc.", "Search query of a company, e.g. 'AAPL, TSM, etc.",
], ],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"], start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "End date in yyyy-mm-dd format"], end_date: Annotated[str, "End date in yyyy-mm-dd format"],
): ):
""" """
Retrieve the latest news about a given stock from Finnhub within a date range Retrieve the latest news about a given stock from Finnhub within a date range
Args: Args:
ticker (str): Ticker of a company. e.g. AAPL, TSM ticker (str): Ticker of a company. e.g. AAPL, TSM
start_date (str): Start date in yyyy-mm-dd format start_date (str): Start date in yyyy-mm-dd format
end_date (str): End date in yyyy-mm-dd format end_date (str): End date in yyyy-mm-dd format
Returns: Returns:
str: A formatted dataframe containing news about the company within the date range from start_date to end_date str: A formatted dataframe containing news about the company within the date range from start_date to end_date
""" """
end_date_str = end_date end_date_str = end_date
end_date = datetime.strptime(end_date, "%Y-%m-%d") end_date = datetime.strptime(end_date, "%Y-%m-%d")
start_date = datetime.strptime(start_date, "%Y-%m-%d") start_date = datetime.strptime(start_date, "%Y-%m-%d")
look_back_days = (end_date - start_date).days look_back_days = (end_date - start_date).days
finnhub_news_result = interface.get_finnhub_news( finnhub_news_result = interface.get_finnhub_news(
ticker, end_date_str, look_back_days ticker, end_date_str, look_back_days
) )
return finnhub_news_result return finnhub_news_result
@staticmethod @staticmethod
@tool @tool
def get_reddit_stock_info( def get_reddit_stock_info(
ticker: Annotated[ ticker: Annotated[
str, str,
"Ticker of a company. e.g. AAPL, TSM", "Ticker of a company. e.g. AAPL, TSM",
], ],
curr_date: Annotated[str, "Current date you want to get news for"], curr_date: Annotated[str, "Current date you want to get news for"],
) -> str: ) -> str:
""" """
Retrieve the latest news about a given stock from Reddit, given the current date. Retrieve the latest news about a given stock from Reddit, given the current date.
Args: Args:
ticker (str): Ticker of a company. e.g. AAPL, TSM ticker (str): Ticker of a company. e.g. AAPL, TSM
curr_date (str): current date in yyyy-mm-dd format to get news for curr_date (str): current date in yyyy-mm-dd format to get news for
Returns: Returns:
str: A formatted dataframe containing the latest news about the company on the given date str: A formatted dataframe containing the latest news about the company on the given date
""" """
stock_news_results = interface.get_reddit_company_news(ticker, curr_date, 7, 5) stock_news_results = interface.get_reddit_company_news(ticker, curr_date, 7, 5)
return stock_news_results return stock_news_results
@staticmethod @staticmethod
@tool @tool
def get_YFin_data( def get_YFin_data(
symbol: Annotated[str, "ticker symbol of the company"], symbol: Annotated[str, "ticker symbol of the company"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"], start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "Start date in yyyy-mm-dd format"], end_date: Annotated[str, "Start date in yyyy-mm-dd format"],
) -> str: ) -> str:
""" """
Retrieve the stock price data for a given ticker symbol from Yahoo Finance. Retrieve the stock price data for a given ticker symbol from Yahoo Finance.
Args: Args:
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
start_date (str): Start date in yyyy-mm-dd format start_date (str): Start date in yyyy-mm-dd format
end_date (str): End date in yyyy-mm-dd format end_date (str): End date in yyyy-mm-dd format
Returns: Returns:
str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range. str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range.
""" """
result_data = interface.get_YFin_data(symbol, start_date, end_date) result_data = interface.get_YFin_data(symbol, start_date, end_date)
return result_data return result_data
@staticmethod @staticmethod
@tool @tool
def get_YFin_data_online( def get_YFin_data_online(
symbol: Annotated[str, "ticker symbol of the company"], symbol: Annotated[str, "ticker symbol of the company"],
start_date: Annotated[str, "Start date in yyyy-mm-dd format"], start_date: Annotated[str, "Start date in yyyy-mm-dd format"],
end_date: Annotated[str, "Start date in yyyy-mm-dd format"], end_date: Annotated[str, "Start date in yyyy-mm-dd format"],
) -> str: ) -> str:
""" """
Retrieve the stock price data for a given ticker symbol from Yahoo Finance. Retrieve the stock price data for a given ticker symbol from Yahoo Finance.
Args: Args:
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
start_date (str): Start date in yyyy-mm-dd format start_date (str): Start date in yyyy-mm-dd format
end_date (str): End date in yyyy-mm-dd format end_date (str): End date in yyyy-mm-dd format
Returns: Returns:
str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range. str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range.
""" """
result_data = interface.get_YFin_data_online(symbol, start_date, end_date) result_data = interface.get_YFin_data_online(symbol, start_date, end_date)
return result_data return result_data
@staticmethod @staticmethod
@tool @tool
def get_stockstats_indicators_report( def get_stockstats_indicators_report(
symbol: Annotated[str, "ticker symbol of the company"], symbol: Annotated[str, "ticker symbol of the company"],
indicator: Annotated[ indicator: Annotated[
str, "technical indicator to get the analysis and report of" str, "technical indicator to get the analysis and report of"
], ],
curr_date: Annotated[ curr_date: Annotated[
str, "The current trading date you are trading on, YYYY-mm-dd" str, "The current trading date you are trading on, YYYY-mm-dd"
], ],
look_back_days: Annotated[int, "how many days to look back"] = 30, look_back_days: Annotated[int, "how many days to look back"] = 30,
) -> str: ) -> str:
""" """
Retrieve stock stats indicators for a given ticker symbol and indicator. Retrieve stock stats indicators for a given ticker symbol and indicator.
Args: Args:
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
indicator (str): Technical indicator to get the analysis and report of indicator (str): Technical indicator to get the analysis and report of
curr_date (str): The current trading date you are trading on, YYYY-mm-dd curr_date (str): The current trading date you are trading on, YYYY-mm-dd
look_back_days (int): How many days to look back, default is 30 look_back_days (int): How many days to look back, default is 30
Returns: Returns:
str: A formatted dataframe containing the stock stats indicators for the specified ticker symbol and indicator. str: A formatted dataframe containing the stock stats indicators for the specified ticker symbol and indicator.
""" """
result_stockstats = interface.get_stock_stats_indicators_window( result_stockstats = interface.get_stock_stats_indicators_window(
symbol, indicator, curr_date, look_back_days, False symbol, indicator, curr_date, look_back_days, False
) )
return result_stockstats return result_stockstats
@staticmethod @staticmethod
@tool @tool
def get_stockstats_indicators_report_online( def get_stockstats_indicators_report_online(
symbol: Annotated[str, "ticker symbol of the company"], symbol: Annotated[str, "ticker symbol of the company"],
indicator: Annotated[ indicator: Annotated[
str, "technical indicator to get the analysis and report of" str, "technical indicator to get the analysis and report of"
], ],
curr_date: Annotated[ curr_date: Annotated[
str, "The current trading date you are trading on, YYYY-mm-dd" str, "The current trading date you are trading on, YYYY-mm-dd"
], ],
look_back_days: Annotated[int, "how many days to look back"] = 30, look_back_days: Annotated[int, "how many days to look back"] = 30,
) -> str: ) -> str:
""" """
Retrieve stock stats indicators for a given ticker symbol and indicator. Retrieve stock stats indicators for a given ticker symbol and indicator.
Args: Args:
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
indicator (str): Technical indicator to get the analysis and report of indicator (str): Technical indicator to get the analysis and report of
curr_date (str): The current trading date you are trading on, YYYY-mm-dd curr_date (str): The current trading date you are trading on, YYYY-mm-dd
look_back_days (int): How many days to look back, default is 30 look_back_days (int): How many days to look back, default is 30
Returns: Returns:
str: A formatted dataframe containing the stock stats indicators for the specified ticker symbol and indicator. str: A formatted dataframe containing the stock stats indicators for the specified ticker symbol and indicator.
""" """
result_stockstats = interface.get_stock_stats_indicators_window( result_stockstats = interface.get_stock_stats_indicators_window(
symbol, indicator, curr_date, look_back_days, True symbol, indicator, curr_date, look_back_days, True
) )
return result_stockstats return result_stockstats
@staticmethod @staticmethod
@tool @tool
def get_finnhub_company_insider_sentiment( def get_finnhub_company_insider_sentiment(
ticker: Annotated[str, "ticker symbol for the company"], ticker: Annotated[str, "ticker symbol for the company"],
curr_date: Annotated[ curr_date: Annotated[
str, str,
"current date of you are trading at, yyyy-mm-dd", "current date of you are trading at, yyyy-mm-dd",
], ],
): ):
""" """
Retrieve insider sentiment information about a company (retrieved from public SEC information) for the past 30 days Retrieve insider sentiment information about a company (retrieved from public SEC information) for the past 30 days
Args: Args:
ticker (str): ticker symbol of the company ticker (str): ticker symbol of the company
curr_date (str): current date you are trading at, yyyy-mm-dd curr_date (str): current date you are trading at, yyyy-mm-dd
Returns: Returns:
str: a report of the sentiment in the past 30 days starting at curr_date str: a report of the sentiment in the past 30 days starting at curr_date
""" """
data_sentiment = interface.get_finnhub_company_insider_sentiment( data_sentiment = interface.get_finnhub_company_insider_sentiment(
ticker, curr_date, 30 ticker, curr_date, 30
) )
return data_sentiment return data_sentiment
@staticmethod @staticmethod
@tool @tool
def get_finnhub_company_insider_transactions( def get_finnhub_company_insider_transactions(
ticker: Annotated[str, "ticker symbol"], ticker: Annotated[str, "ticker symbol"],
curr_date: Annotated[ curr_date: Annotated[
str, str,
"current date you are trading at, yyyy-mm-dd", "current date you are trading at, yyyy-mm-dd",
], ],
): ):
""" """
Retrieve insider transaction information about a company (retrieved from public SEC information) for the past 30 days Retrieve insider transaction information about a company (retrieved from public SEC information) for the past 30 days
Args: Args:
ticker (str): ticker symbol of the company ticker (str): ticker symbol of the company
curr_date (str): current date you are trading at, yyyy-mm-dd curr_date (str): current date you are trading at, yyyy-mm-dd
Returns: Returns:
str: a report of the company's insider transactions/trading information in the past 30 days str: a report of the company's insider transactions/trading information in the past 30 days
""" """
data_trans = interface.get_finnhub_company_insider_transactions( data_trans = interface.get_finnhub_company_insider_transactions(
ticker, curr_date, 30 ticker, curr_date, 30
) )
return data_trans return data_trans
@staticmethod @staticmethod
@tool @tool
def get_simfin_balance_sheet( def get_simfin_balance_sheet(
ticker: Annotated[str, "ticker symbol"], ticker: Annotated[str, "ticker symbol"],
freq: Annotated[ freq: Annotated[
str, str,
"reporting frequency of the company's financial history: annual/quarterly", "reporting frequency of the company's financial history: annual/quarterly",
], ],
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"],
): ):
""" """
Retrieve the most recent balance sheet of a company Retrieve the most recent balance sheet of a company
Args: Args:
ticker (str): ticker symbol of the company ticker (str): ticker symbol of the company
freq (str): reporting frequency of the company's financial history: annual / quarterly freq (str): reporting frequency of the company's financial history: annual / quarterly
curr_date (str): current date you are trading at, yyyy-mm-dd curr_date (str): current date you are trading at, yyyy-mm-dd
Returns: Returns:
str: a report of the company's most recent balance sheet str: a report of the company's most recent balance sheet
""" """
data_balance_sheet = interface.get_simfin_balance_sheet(ticker, freq, curr_date) data_balance_sheet = interface.get_simfin_balance_sheet(ticker, freq, curr_date)
return data_balance_sheet return data_balance_sheet
@staticmethod @staticmethod
@tool @tool
def get_simfin_cashflow( def get_simfin_cashflow(
ticker: Annotated[str, "ticker symbol"], ticker: Annotated[str, "ticker symbol"],
freq: Annotated[ freq: Annotated[
str, str,
"reporting frequency of the company's financial history: annual/quarterly", "reporting frequency of the company's financial history: annual/quarterly",
], ],
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"],
): ):
""" """
Retrieve the most recent cash flow statement of a company Retrieve the most recent cash flow statement of a company
Args: Args:
ticker (str): ticker symbol of the company ticker (str): ticker symbol of the company
freq (str): reporting frequency of the company's financial history: annual / quarterly freq (str): reporting frequency of the company's financial history: annual / quarterly
curr_date (str): current date you are trading at, yyyy-mm-dd curr_date (str): current date you are trading at, yyyy-mm-dd
Returns: Returns:
str: a report of the company's most recent cash flow statement str: a report of the company's most recent cash flow statement
""" """
data_cashflow = interface.get_simfin_cashflow(ticker, freq, curr_date) data_cashflow = interface.get_simfin_cashflow(ticker, freq, curr_date)
return data_cashflow return data_cashflow
@staticmethod @staticmethod
@tool @tool
def get_simfin_income_stmt( def get_simfin_income_stmt(
ticker: Annotated[str, "ticker symbol"], ticker: Annotated[str, "ticker symbol"],
freq: Annotated[ freq: Annotated[
str, str,
"reporting frequency of the company's financial history: annual/quarterly", "reporting frequency of the company's financial history: annual/quarterly",
], ],
curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"],
): ):
""" """
Retrieve the most recent income statement of a company Retrieve the most recent income statement of a company
Args: Args:
ticker (str): ticker symbol of the company ticker (str): ticker symbol of the company
freq (str): reporting frequency of the company's financial history: annual / quarterly freq (str): reporting frequency of the company's financial history: annual / quarterly
curr_date (str): current date you are trading at, yyyy-mm-dd curr_date (str): current date you are trading at, yyyy-mm-dd
Returns: Returns:
str: a report of the company's most recent income statement str: a report of the company's most recent income statement
""" """
data_income_stmt = interface.get_simfin_income_statements( data_income_stmt = interface.get_simfin_income_statements(
ticker, freq, curr_date ticker, freq, curr_date
) )
return data_income_stmt return data_income_stmt
@staticmethod @staticmethod
@tool @tool
def get_google_news( def get_google_news(
query: Annotated[str, "Query to search with"], query: Annotated[str, "Query to search with"],
curr_date: Annotated[str, "Curr date in yyyy-mm-dd format"], curr_date: Annotated[str, "Curr date in yyyy-mm-dd format"],
): ):
""" """
Retrieve the latest news from Google News based on a query and date range. Retrieve the latest news from Google News based on a query and date range.
Args: Args:
query (str): Query to search with query (str): Query to search with
curr_date (str): Current date in yyyy-mm-dd format curr_date (str): Current date in yyyy-mm-dd format
look_back_days (int): How many days to look back look_back_days (int): How many days to look back
Returns: Returns:
str: A formatted string containing the latest news from Google News based on the query and date range. str: A formatted string containing the latest news from Google News based on the query and date range.
""" """
google_news_results = interface.get_google_news(query, curr_date, 7) google_news_results = interface.get_google_news(query, curr_date, 7)
return google_news_results return google_news_results
@staticmethod @staticmethod
@tool @tool
def get_stock_news_openai( def get_stock_news_openai(
ticker: Annotated[str, "the company's ticker"], ticker: Annotated[str, "the company's ticker"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
): ):
""" """
Retrieve the latest news about a given stock by using OpenAI's news API. Retrieve the latest news about a given stock by using OpenAI's news API.
Args: Args:
ticker (str): Ticker of a company. e.g. AAPL, TSM ticker (str): Ticker of a company. e.g. AAPL, TSM
curr_date (str): Current date in yyyy-mm-dd format curr_date (str): Current date in yyyy-mm-dd format
Returns: Returns:
str: A formatted string containing the latest news about the company on the given date. str: A formatted string containing the latest news about the company on the given date.
""" """
openai_news_results = interface.get_stock_news_openai(ticker, curr_date) openai_news_results = interface.get_stock_news_openai(ticker, curr_date)
return openai_news_results return openai_news_results
@staticmethod @staticmethod
@tool @tool
def get_global_news_openai( def get_global_news_openai(
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
): ):
""" """
Retrieve the latest macroeconomics news on a given date using OpenAI's macroeconomics news API. Retrieve the latest macroeconomics news on a given date using OpenAI's macroeconomics news API.
Args: Args:
curr_date (str): Current date in yyyy-mm-dd format curr_date (str): Current date in yyyy-mm-dd format
Returns: Returns:
str: A formatted string containing the latest macroeconomic news on the given date. str: A formatted string containing the latest macroeconomic news on the given date.
""" """
openai_news_results = interface.get_global_news_openai(curr_date) openai_news_results = interface.get_global_news_openai(curr_date)
return openai_news_results return openai_news_results
@staticmethod @staticmethod
@tool @tool
def get_fundamentals_openai( def get_fundamentals_openai(
ticker: Annotated[str, "the company's ticker"], ticker: Annotated[str, "the company's ticker"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
): ):
""" """
Retrieve the latest fundamental information about a given stock on a given date by using OpenAI's news API. Retrieve the latest fundamental information about a given stock on a given date by using OpenAI's news API.
Args: Args:
ticker (str): Ticker of a company. e.g. AAPL, TSM ticker (str): Ticker of a company. e.g. AAPL, TSM
curr_date (str): Current date in yyyy-mm-dd format curr_date (str): Current date in yyyy-mm-dd format
Returns: Returns:
str: A formatted string containing the latest fundamental information about the company on the given date. str: A formatted string containing the latest fundamental information about the company on the given date.
""" """
openai_fundamentals_results = interface.get_fundamentals_openai( openai_fundamentals_results = interface.get_fundamentals_openai(
ticker, curr_date ticker, curr_date
) )
return openai_fundamentals_results return openai_fundamentals_results

View File

@ -1,109 +1,110 @@
import chromadb import chromadb
from chromadb.config import Settings from chromadb.config import Settings
from openai import OpenAI from openai import OpenAI
import numpy as np import numpy as np
from langchain_openai import OpenAIEmbeddings
import os
class FinancialSituationMemory:
def __init__(self, name): class FinancialSituationMemory:
self.client = OpenAI() def __init__(self, name):
self.chroma_client = chromadb.Client(Settings(allow_reset=True)) # self.client = OpenAI()
self.situation_collection = self.chroma_client.create_collection(name=name) self.embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", api_key=os.getenv("OPENAI_API_KEY"))
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
def get_embedding(self, text): self.situation_collection = self.chroma_client.create_collection(name=name)
"""Get OpenAI embedding for a text"""
response = self.client.embeddings.create( def get_embedding(self, text):
model="text-embedding-ada-002", input=text """Get OpenAI embedding for a text"""
) embedding = self.embeddings.embed_query(text)
return response.data[0].embedding
return embedding
def add_situations(self, situations_and_advice):
"""Add financial situations and their corresponding advice. Parameter is a list of tuples (situation, rec)""" def add_situations(self, situations_and_advice):
"""Add financial situations and their corresponding advice. Parameter is a list of tuples (situation, rec)"""
situations = []
advice = [] situations = []
ids = [] advice = []
embeddings = [] ids = []
embeddings = []
offset = self.situation_collection.count()
offset = self.situation_collection.count()
for i, (situation, recommendation) in enumerate(situations_and_advice):
situations.append(situation) for i, (situation, recommendation) in enumerate(situations_and_advice):
advice.append(recommendation) situations.append(situation)
ids.append(str(offset + i)) advice.append(recommendation)
embeddings.append(self.get_embedding(situation)) ids.append(str(offset + i))
embeddings.append(self.get_embedding(situation))
self.situation_collection.add(
documents=situations, self.situation_collection.add(
metadatas=[{"recommendation": rec} for rec in advice], documents=situations,
embeddings=embeddings, metadatas=[{"recommendation": rec} for rec in advice],
ids=ids, embeddings=embeddings,
) ids=ids,
)
def get_memories(self, current_situation, n_matches=1):
"""Find matching recommendations using OpenAI embeddings""" def get_memories(self, current_situation, n_matches=1):
query_embedding = self.get_embedding(current_situation) """Find matching recommendations using OpenAI embeddings"""
query_embedding = self.get_embedding(current_situation)
results = self.situation_collection.query(
query_embeddings=[query_embedding], results = self.situation_collection.query(
n_results=n_matches, query_embeddings=[query_embedding],
include=["metadatas", "documents", "distances"], n_results=n_matches,
) include=["metadatas", "documents", "distances"],
)
matched_results = []
for i in range(len(results["documents"][0])): matched_results = []
matched_results.append( for i in range(len(results["documents"][0])):
{ matched_results.append(
"matched_situation": results["documents"][0][i], {
"recommendation": results["metadatas"][0][i]["recommendation"], "matched_situation": results["documents"][0][i],
"similarity_score": 1 - results["distances"][0][i], "recommendation": results["metadatas"][0][i]["recommendation"],
} "similarity_score": 1 - results["distances"][0][i],
) }
)
return matched_results
return matched_results
if __name__ == "__main__":
# Example usage if __name__ == "__main__":
matcher = FinancialSituationMemory() # Example usage
matcher = FinancialSituationMemory()
# Example data
example_data = [ # Example data
( example_data = [
"High inflation rate with rising interest rates and declining consumer spending", (
"Consider defensive sectors like consumer staples and utilities. Review fixed-income portfolio duration.", "High inflation rate with rising interest rates and declining consumer spending",
), "Consider defensive sectors like consumer staples and utilities. Review fixed-income portfolio duration.",
( ),
"Tech sector showing high volatility with increasing institutional selling pressure", (
"Reduce exposure to high-growth tech stocks. Look for value opportunities in established tech companies with strong cash flows.", "Tech sector showing high volatility with increasing institutional selling pressure",
), "Reduce exposure to high-growth tech stocks. Look for value opportunities in established tech companies with strong cash flows.",
( ),
"Strong dollar affecting emerging markets with increasing forex volatility", (
"Hedge currency exposure in international positions. Consider reducing allocation to emerging market debt.", "Strong dollar affecting emerging markets with increasing forex volatility",
), "Hedge currency exposure in international positions. Consider reducing allocation to emerging market debt.",
( ),
"Market showing signs of sector rotation with rising yields", (
"Rebalance portfolio to maintain target allocations. Consider increasing exposure to sectors benefiting from higher rates.", "Market showing signs of sector rotation with rising yields",
), "Rebalance portfolio to maintain target allocations. Consider increasing exposure to sectors benefiting from higher rates.",
] ),
]
# Add the example situations and recommendations
matcher.add_situations(example_data) # Add the example situations and recommendations
matcher.add_situations(example_data)
# Example query
current_situation = """ # Example query
Market showing increased volatility in tech sector, with institutional investors current_situation = """
reducing positions and rising interest rates affecting growth stock valuations Market showing increased volatility in tech sector, with institutional investors
""" reducing positions and rising interest rates affecting growth stock valuations
"""
try:
recommendations = matcher.get_memories(current_situation, n_matches=2) try:
recommendations = matcher.get_memories(current_situation, n_matches=2)
for i, rec in enumerate(recommendations, 1):
print(f"\nMatch {i}:") for i, rec in enumerate(recommendations, 1):
print(f"Similarity Score: {rec['similarity_score']:.2f}") print(f"\nMatch {i}:")
print(f"Matched Situation: {rec['matched_situation']}") print(f"Similarity Score: {rec['similarity_score']:.2f}")
print(f"Recommendation: {rec['recommendation']}") print(f"Matched Situation: {rec['matched_situation']}")
print(f"Recommendation: {rec['recommendation']}")
except Exception as e:
print(f"Error during recommendation: {str(e)}") except Exception as e:
print(f"Error during recommendation: {str(e)}")

View File

@ -1,46 +1,46 @@
from .finnhub_utils import get_data_in_range from .finnhub_utils import get_data_in_range
from .googlenews_utils import getNewsData from .googlenews_utils import getNewsData
from .yfin_utils import YFinanceUtils from .yfin_utils import YFinanceUtils
from .reddit_utils import fetch_top_from_category from .reddit_utils import fetch_top_from_category
from .stockstats_utils import StockstatsUtils from .stockstats_utils import StockstatsUtils
from .yfin_utils import YFinanceUtils from .yfin_utils import YFinanceUtils
from .interface import ( from .interface import (
# News and sentiment functions # News and sentiment functions
get_finnhub_news, get_finnhub_news,
get_finnhub_company_insider_sentiment, get_finnhub_company_insider_sentiment,
get_finnhub_company_insider_transactions, get_finnhub_company_insider_transactions,
get_google_news, get_google_news,
get_reddit_global_news, get_reddit_global_news,
get_reddit_company_news, get_reddit_company_news,
# Financial statements functions # Financial statements functions
get_simfin_balance_sheet, get_simfin_balance_sheet,
get_simfin_cashflow, get_simfin_cashflow,
get_simfin_income_statements, get_simfin_income_statements,
# Technical analysis functions # Technical analysis functions
get_stock_stats_indicators_window, get_stock_stats_indicators_window,
get_stockstats_indicator, get_stockstats_indicator,
# Market data functions # Market data functions
get_YFin_data_window, get_YFin_data_window,
get_YFin_data, get_YFin_data,
) )
__all__ = [ __all__ = [
# News and sentiment functions # News and sentiment functions
"get_finnhub_news", "get_finnhub_news",
"get_finnhub_company_insider_sentiment", "get_finnhub_company_insider_sentiment",
"get_finnhub_company_insider_transactions", "get_finnhub_company_insider_transactions",
"get_google_news", "get_google_news",
"get_reddit_global_news", "get_reddit_global_news",
"get_reddit_company_news", "get_reddit_company_news",
# Financial statements functions # Financial statements functions
"get_simfin_balance_sheet", "get_simfin_balance_sheet",
"get_simfin_cashflow", "get_simfin_cashflow",
"get_simfin_income_statements", "get_simfin_income_statements",
# Technical analysis functions # Technical analysis functions
"get_stock_stats_indicators_window", "get_stock_stats_indicators_window",
"get_stockstats_indicator", "get_stockstats_indicator",
# Market data functions # Market data functions
"get_YFin_data_window", "get_YFin_data_window",
"get_YFin_data", "get_YFin_data",
] ]

View File

@ -1,34 +1,34 @@
import tradingagents.default_config as default_config import tradingagents.default_config as default_config
from typing import Dict, Optional from typing import Dict, Optional
# Use default config but allow it to be overridden # Use default config but allow it to be overridden
_config: Optional[Dict] = None _config: Optional[Dict] = None
DATA_DIR: Optional[str] = None DATA_DIR: Optional[str] = None
def initialize_config(): def initialize_config():
"""Initialize the configuration with default values.""" """Initialize the configuration with default values."""
global _config, DATA_DIR global _config, DATA_DIR
if _config is None: if _config is None:
_config = default_config.DEFAULT_CONFIG.copy() _config = default_config.DEFAULT_CONFIG.copy()
DATA_DIR = _config["data_dir"] DATA_DIR = _config["data_dir"]
def set_config(config: Dict): def set_config(config: Dict):
"""Update the configuration with custom values.""" """Update the configuration with custom values."""
global _config, DATA_DIR global _config, DATA_DIR
if _config is None: if _config is None:
_config = default_config.DEFAULT_CONFIG.copy() _config = default_config.DEFAULT_CONFIG.copy()
_config.update(config) _config.update(config)
DATA_DIR = _config["data_dir"] DATA_DIR = _config["data_dir"]
def get_config() -> Dict: def get_config() -> Dict:
"""Get the current configuration.""" """Get the current configuration."""
if _config is None: if _config is None:
initialize_config() initialize_config()
return _config.copy() return _config.copy()
# Initialize with default config # Initialize with default config
initialize_config() initialize_config()

View File

@ -1,36 +1,36 @@
import json import json
import os import os
def get_data_in_range(ticker, start_date, end_date, data_type, data_dir, period=None): def get_data_in_range(ticker, start_date, end_date, data_type, data_dir, period=None):
""" """
Gets finnhub data saved and processed on disk. Gets finnhub data saved and processed on disk.
Args: Args:
start_date (str): Start date in YYYY-MM-DD format. start_date (str): Start date in YYYY-MM-DD format.
end_date (str): End date in YYYY-MM-DD format. end_date (str): End date in YYYY-MM-DD format.
data_type (str): Type of data from finnhub to fetch. Can be insider_trans, SEC_filings, news_data, insider_senti, or fin_as_reported. data_type (str): Type of data from finnhub to fetch. Can be insider_trans, SEC_filings, news_data, insider_senti, or fin_as_reported.
data_dir (str): Directory where the data is saved. data_dir (str): Directory where the data is saved.
period (str): Default to none, if there is a period specified, should be annual or quarterly. period (str): Default to none, if there is a period specified, should be annual or quarterly.
""" """
if period: if period:
data_path = os.path.join( data_path = os.path.join(
data_dir, data_dir,
"finnhub_data", "finnhub_data",
data_type, data_type,
f"{ticker}_{period}_data_formatted.json", f"{ticker}_{period}_data_formatted.json",
) )
else: else:
data_path = os.path.join( data_path = os.path.join(
data_dir, "finnhub_data", data_type, f"{ticker}_data_formatted.json" data_dir, "finnhub_data", data_type, f"{ticker}_data_formatted.json"
) )
data = open(data_path, "r") data = open(data_path, "r")
data = json.load(data) data = json.load(data)
# filter keys (date, str in format YYYY-MM-DD) by the date range (str, str in format YYYY-MM-DD) # filter keys (date, str in format YYYY-MM-DD) by the date range (str, str in format YYYY-MM-DD)
filtered_data = {} filtered_data = {}
for key, value in data.items(): for key, value in data.items():
if start_date <= key <= end_date and len(value) > 0: if start_date <= key <= end_date and len(value) > 0:
filtered_data[key] = value filtered_data[key] = value
return filtered_data return filtered_data

View File

@ -1,108 +1,108 @@
import json import json
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from datetime import datetime from datetime import datetime
import time import time
import random import random
from tenacity import ( from tenacity import (
retry, retry,
stop_after_attempt, stop_after_attempt,
wait_exponential, wait_exponential,
retry_if_exception_type, retry_if_exception_type,
retry_if_result, retry_if_result,
) )
def is_rate_limited(response): def is_rate_limited(response):
"""Check if the response indicates rate limiting (status code 429)""" """Check if the response indicates rate limiting (status code 429)"""
return response.status_code == 429 return response.status_code == 429
@retry( @retry(
retry=(retry_if_result(is_rate_limited)), retry=(retry_if_result(is_rate_limited)),
wait=wait_exponential(multiplier=1, min=4, max=60), wait=wait_exponential(multiplier=1, min=4, max=60),
stop=stop_after_attempt(5), stop=stop_after_attempt(5),
) )
def make_request(url, headers): def make_request(url, headers):
"""Make a request with retry logic for rate limiting""" """Make a request with retry logic for rate limiting"""
# Random delay before each request to avoid detection # Random delay before each request to avoid detection
time.sleep(random.uniform(2, 6)) time.sleep(random.uniform(2, 6))
response = requests.get(url, headers=headers) response = requests.get(url, headers=headers)
return response return response
def getNewsData(query, start_date, end_date): def getNewsData(query, start_date, end_date):
""" """
Scrape Google News search results for a given query and date range. Scrape Google News search results for a given query and date range.
query: str - search query query: str - search query
start_date: str - start date in the format yyyy-mm-dd or mm/dd/yyyy start_date: str - start date in the format yyyy-mm-dd or mm/dd/yyyy
end_date: str - end date in the format yyyy-mm-dd or mm/dd/yyyy end_date: str - end date in the format yyyy-mm-dd or mm/dd/yyyy
""" """
if "-" in start_date: if "-" in start_date:
start_date = datetime.strptime(start_date, "%Y-%m-%d") start_date = datetime.strptime(start_date, "%Y-%m-%d")
start_date = start_date.strftime("%m/%d/%Y") start_date = start_date.strftime("%m/%d/%Y")
if "-" in end_date: if "-" in end_date:
end_date = datetime.strptime(end_date, "%Y-%m-%d") end_date = datetime.strptime(end_date, "%Y-%m-%d")
end_date = end_date.strftime("%m/%d/%Y") end_date = end_date.strftime("%m/%d/%Y")
headers = { headers = {
"User-Agent": ( "User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) " "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) " "AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/101.0.4951.54 Safari/537.36" "Chrome/101.0.4951.54 Safari/537.36"
) )
} }
news_results = [] news_results = []
page = 0 page = 0
while True: while True:
offset = page * 10 offset = page * 10
url = ( url = (
f"https://www.google.com/search?q={query}" f"https://www.google.com/search?q={query}"
f"&tbs=cdr:1,cd_min:{start_date},cd_max:{end_date}" f"&tbs=cdr:1,cd_min:{start_date},cd_max:{end_date}"
f"&tbm=nws&start={offset}" f"&tbm=nws&start={offset}"
) )
try: try:
response = make_request(url, headers) response = make_request(url, headers)
soup = BeautifulSoup(response.content, "html.parser") soup = BeautifulSoup(response.content, "html.parser")
results_on_page = soup.select("div.SoaBEf") results_on_page = soup.select("div.SoaBEf")
if not results_on_page: if not results_on_page:
break # No more results found break # No more results found
for el in results_on_page: for el in results_on_page:
try: try:
link = el.find("a")["href"] link = el.find("a")["href"]
title = el.select_one("div.MBeuO").get_text() title = el.select_one("div.MBeuO").get_text()
snippet = el.select_one(".GI74Re").get_text() snippet = el.select_one(".GI74Re").get_text()
date = el.select_one(".LfVVr").get_text() date = el.select_one(".LfVVr").get_text()
source = el.select_one(".NUnG9d span").get_text() source = el.select_one(".NUnG9d span").get_text()
news_results.append( news_results.append(
{ {
"link": link, "link": link,
"title": title, "title": title,
"snippet": snippet, "snippet": snippet,
"date": date, "date": date,
"source": source, "source": source,
} }
) )
except Exception as e: except Exception as e:
print(f"Error processing result: {e}") print(f"Error processing result: {e}")
# If one of the fields is not found, skip this result # If one of the fields is not found, skip this result
continue continue
# Update the progress bar with the current count of results scraped # Update the progress bar with the current count of results scraped
# Check for the "Next" link (pagination) # Check for the "Next" link (pagination)
next_link = soup.find("a", id="pnnext") next_link = soup.find("a", id="pnnext")
if not next_link: if not next_link:
break break
page += 1 page += 1
except Exception as e: except Exception as e:
print(f"Failed after multiple retries: {e}") print(f"Failed after multiple retries: {e}")
break break
return news_results return news_results

File diff suppressed because it is too large Load Diff

View File

@ -1,135 +1,135 @@
import requests import requests
import time import time
import json import json
from datetime import datetime, timedelta from datetime import datetime, timedelta
from contextlib import contextmanager from contextlib import contextmanager
from typing import Annotated from typing import Annotated
import os import os
import re import re
ticker_to_company = { ticker_to_company = {
"AAPL": "Apple", "AAPL": "Apple",
"MSFT": "Microsoft", "MSFT": "Microsoft",
"GOOGL": "Google", "GOOGL": "Google",
"AMZN": "Amazon", "AMZN": "Amazon",
"TSLA": "Tesla", "TSLA": "Tesla",
"NVDA": "Nvidia", "NVDA": "Nvidia",
"TSM": "Taiwan Semiconductor Manufacturing Company OR TSMC", "TSM": "Taiwan Semiconductor Manufacturing Company OR TSMC",
"JPM": "JPMorgan Chase OR JP Morgan", "JPM": "JPMorgan Chase OR JP Morgan",
"JNJ": "Johnson & Johnson OR JNJ", "JNJ": "Johnson & Johnson OR JNJ",
"V": "Visa", "V": "Visa",
"WMT": "Walmart", "WMT": "Walmart",
"META": "Meta OR Facebook", "META": "Meta OR Facebook",
"AMD": "AMD", "AMD": "AMD",
"INTC": "Intel", "INTC": "Intel",
"QCOM": "Qualcomm", "QCOM": "Qualcomm",
"BABA": "Alibaba", "BABA": "Alibaba",
"ADBE": "Adobe", "ADBE": "Adobe",
"NFLX": "Netflix", "NFLX": "Netflix",
"CRM": "Salesforce", "CRM": "Salesforce",
"PYPL": "PayPal", "PYPL": "PayPal",
"PLTR": "Palantir", "PLTR": "Palantir",
"MU": "Micron", "MU": "Micron",
"SQ": "Block OR Square", "SQ": "Block OR Square",
"ZM": "Zoom", "ZM": "Zoom",
"CSCO": "Cisco", "CSCO": "Cisco",
"SHOP": "Shopify", "SHOP": "Shopify",
"ORCL": "Oracle", "ORCL": "Oracle",
"X": "Twitter OR X", "X": "Twitter OR X",
"SPOT": "Spotify", "SPOT": "Spotify",
"AVGO": "Broadcom", "AVGO": "Broadcom",
"ASML": "ASML ", "ASML": "ASML ",
"TWLO": "Twilio", "TWLO": "Twilio",
"SNAP": "Snap Inc.", "SNAP": "Snap Inc.",
"TEAM": "Atlassian", "TEAM": "Atlassian",
"SQSP": "Squarespace", "SQSP": "Squarespace",
"UBER": "Uber", "UBER": "Uber",
"ROKU": "Roku", "ROKU": "Roku",
"PINS": "Pinterest", "PINS": "Pinterest",
} }
def fetch_top_from_category( def fetch_top_from_category(
category: Annotated[ category: Annotated[
str, "Category to fetch top post from. Collection of subreddits." str, "Category to fetch top post from. Collection of subreddits."
], ],
date: Annotated[str, "Date to fetch top posts from."], date: Annotated[str, "Date to fetch top posts from."],
max_limit: Annotated[int, "Maximum number of posts to fetch."], max_limit: Annotated[int, "Maximum number of posts to fetch."],
query: Annotated[str, "Optional query to search for in the subreddit."] = None, query: Annotated[str, "Optional query to search for in the subreddit."] = None,
data_path: Annotated[ data_path: Annotated[
str, str,
"Path to the data folder. Default is 'reddit_data'.", "Path to the data folder. Default is 'reddit_data'.",
] = "reddit_data", ] = "reddit_data",
): ):
base_path = data_path base_path = data_path
all_content = [] all_content = []
if max_limit < len(os.listdir(os.path.join(base_path, category))): if max_limit < len(os.listdir(os.path.join(base_path, category))):
raise ValueError( raise ValueError(
"REDDIT FETCHING ERROR: max limit is less than the number of files in the category. Will not be able to fetch any posts" "REDDIT FETCHING ERROR: max limit is less than the number of files in the category. Will not be able to fetch any posts"
) )
limit_per_subreddit = max_limit // len( limit_per_subreddit = max_limit // len(
os.listdir(os.path.join(base_path, category)) os.listdir(os.path.join(base_path, category))
) )
for data_file in os.listdir(os.path.join(base_path, category)): for data_file in os.listdir(os.path.join(base_path, category)):
# check if data_file is a .jsonl file # check if data_file is a .jsonl file
if not data_file.endswith(".jsonl"): if not data_file.endswith(".jsonl"):
continue continue
all_content_curr_subreddit = [] all_content_curr_subreddit = []
with open(os.path.join(base_path, category, data_file), "rb") as f: with open(os.path.join(base_path, category, data_file), "rb") as f:
for i, line in enumerate(f): for i, line in enumerate(f):
# skip empty lines # skip empty lines
if not line.strip(): if not line.strip():
continue continue
parsed_line = json.loads(line) parsed_line = json.loads(line)
# select only lines that are from the date # select only lines that are from the date
post_date = datetime.utcfromtimestamp( post_date = datetime.utcfromtimestamp(
parsed_line["created_utc"] parsed_line["created_utc"]
).strftime("%Y-%m-%d") ).strftime("%Y-%m-%d")
if post_date != date: if post_date != date:
continue continue
# if is company_news, check that the title or the content has the company's name (query) mentioned # if is company_news, check that the title or the content has the company's name (query) mentioned
if "company" in category and query: if "company" in category and query:
search_terms = [] search_terms = []
if "OR" in ticker_to_company[query]: if "OR" in ticker_to_company[query]:
search_terms = ticker_to_company[query].split(" OR ") search_terms = ticker_to_company[query].split(" OR ")
else: else:
search_terms = [ticker_to_company[query]] search_terms = [ticker_to_company[query]]
search_terms.append(query) search_terms.append(query)
found = False found = False
for term in search_terms: for term in search_terms:
if re.search( if re.search(
term, parsed_line["title"], re.IGNORECASE term, parsed_line["title"], re.IGNORECASE
) or re.search(term, parsed_line["selftext"], re.IGNORECASE): ) or re.search(term, parsed_line["selftext"], re.IGNORECASE):
found = True found = True
break break
if not found: if not found:
continue continue
post = { post = {
"title": parsed_line["title"], "title": parsed_line["title"],
"content": parsed_line["selftext"], "content": parsed_line["selftext"],
"url": parsed_line["url"], "url": parsed_line["url"],
"upvotes": parsed_line["ups"], "upvotes": parsed_line["ups"],
"posted_date": post_date, "posted_date": post_date,
} }
all_content_curr_subreddit.append(post) all_content_curr_subreddit.append(post)
# sort all_content_curr_subreddit by upvote_ratio in descending order # sort all_content_curr_subreddit by upvote_ratio in descending order
all_content_curr_subreddit.sort(key=lambda x: x["upvotes"], reverse=True) all_content_curr_subreddit.sort(key=lambda x: x["upvotes"], reverse=True)
all_content.extend(all_content_curr_subreddit[:limit_per_subreddit]) all_content.extend(all_content_curr_subreddit[:limit_per_subreddit])
return all_content return all_content

View File

@ -1,87 +1,87 @@
import pandas as pd import pandas as pd
import yfinance as yf import yfinance as yf
from stockstats import wrap from stockstats import wrap
from typing import Annotated from typing import Annotated
import os import os
from .config import get_config from .config import get_config
class StockstatsUtils: class StockstatsUtils:
@staticmethod @staticmethod
def get_stock_stats( def get_stock_stats(
symbol: Annotated[str, "ticker symbol for the company"], symbol: Annotated[str, "ticker symbol for the company"],
indicator: Annotated[ indicator: Annotated[
str, "quantitative indicators based off of the stock data for the company" str, "quantitative indicators based off of the stock data for the company"
], ],
curr_date: Annotated[ curr_date: Annotated[
str, "curr date for retrieving stock price data, YYYY-mm-dd" str, "curr date for retrieving stock price data, YYYY-mm-dd"
], ],
data_dir: Annotated[ data_dir: Annotated[
str, str,
"directory where the stock data is stored.", "directory where the stock data is stored.",
], ],
online: Annotated[ online: Annotated[
bool, bool,
"whether to use online tools to fetch data or offline tools. If True, will use online tools.", "whether to use online tools to fetch data or offline tools. If True, will use online tools.",
] = False, ] = False,
): ):
df = None df = None
data = None data = None
if not online: if not online:
try: try:
data = pd.read_csv( data = pd.read_csv(
os.path.join( os.path.join(
data_dir, data_dir,
f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv", f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv",
) )
) )
df = wrap(data) df = wrap(data)
except FileNotFoundError: except FileNotFoundError:
raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!") raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!")
else: else:
# Get today's date as YYYY-mm-dd to add to cache # Get today's date as YYYY-mm-dd to add to cache
today_date = pd.Timestamp.today() today_date = pd.Timestamp.today()
curr_date = pd.to_datetime(curr_date) curr_date = pd.to_datetime(curr_date)
end_date = today_date end_date = today_date
start_date = today_date - pd.DateOffset(years=15) start_date = today_date - pd.DateOffset(years=15)
start_date = start_date.strftime("%Y-%m-%d") start_date = start_date.strftime("%Y-%m-%d")
end_date = end_date.strftime("%Y-%m-%d") end_date = end_date.strftime("%Y-%m-%d")
# Get config and ensure cache directory exists # Get config and ensure cache directory exists
config = get_config() config = get_config()
os.makedirs(config["data_cache_dir"], exist_ok=True) os.makedirs(config["data_cache_dir"], exist_ok=True)
data_file = os.path.join( data_file = os.path.join(
config["data_cache_dir"], config["data_cache_dir"],
f"{symbol}-YFin-data-{start_date}-{end_date}.csv", f"{symbol}-YFin-data-{start_date}-{end_date}.csv",
) )
if os.path.exists(data_file): if os.path.exists(data_file):
data = pd.read_csv(data_file) data = pd.read_csv(data_file)
data["Date"] = pd.to_datetime(data["Date"]) data["Date"] = pd.to_datetime(data["Date"])
else: else:
data = yf.download( data = yf.download(
symbol, symbol,
start=start_date, start=start_date,
end=end_date, end=end_date,
multi_level_index=False, multi_level_index=False,
progress=False, progress=False,
auto_adjust=True, auto_adjust=True,
) )
data = data.reset_index() data = data.reset_index()
data.to_csv(data_file, index=False) data.to_csv(data_file, index=False)
df = wrap(data) df = wrap(data)
df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") df["Date"] = df["Date"].dt.strftime("%Y-%m-%d")
curr_date = curr_date.strftime("%Y-%m-%d") curr_date = curr_date.strftime("%Y-%m-%d")
df[indicator] # trigger stockstats to calculate the indicator df[indicator] # trigger stockstats to calculate the indicator
matching_rows = df[df["Date"].str.startswith(curr_date)] matching_rows = df[df["Date"].str.startswith(curr_date)]
if not matching_rows.empty: if not matching_rows.empty:
indicator_value = matching_rows[indicator].values[0] indicator_value = matching_rows[indicator].values[0]
return indicator_value return indicator_value
else: else:
return "N/A: Not a trading day (weekend or holiday)" return "N/A: Not a trading day (weekend or holiday)"

View File

@ -1,39 +1,39 @@
import os import os
import json import json
import pandas as pd import pandas as pd
from datetime import date, timedelta, datetime from datetime import date, timedelta, datetime
from typing import Annotated from typing import Annotated
SavePathType = Annotated[str, "File path to save data. If None, data is not saved."] SavePathType = Annotated[str, "File path to save data. If None, data is not saved."]
def save_output(data: pd.DataFrame, tag: str, save_path: SavePathType = None) -> None: def save_output(data: pd.DataFrame, tag: str, save_path: SavePathType = None) -> None:
if save_path: if save_path:
data.to_csv(save_path) data.to_csv(save_path)
print(f"{tag} saved to {save_path}") print(f"{tag} saved to {save_path}")
def get_current_date(): def get_current_date():
return date.today().strftime("%Y-%m-%d") return date.today().strftime("%Y-%m-%d")
def decorate_all_methods(decorator): def decorate_all_methods(decorator):
def class_decorator(cls): def class_decorator(cls):
for attr_name, attr_value in cls.__dict__.items(): for attr_name, attr_value in cls.__dict__.items():
if callable(attr_value): if callable(attr_value):
setattr(cls, attr_name, decorator(attr_value)) setattr(cls, attr_name, decorator(attr_value))
return cls return cls
return class_decorator return class_decorator
def get_next_weekday(date): def get_next_weekday(date):
if not isinstance(date, datetime): if not isinstance(date, datetime):
date = datetime.strptime(date, "%Y-%m-%d") date = datetime.strptime(date, "%Y-%m-%d")
if date.weekday() >= 5: if date.weekday() >= 5:
days_to_add = 7 - date.weekday() days_to_add = 7 - date.weekday()
next_weekday = date + timedelta(days=days_to_add) next_weekday = date + timedelta(days=days_to_add)
return next_weekday return next_weekday
else: else:
return date return date

View File

@ -1,117 +1,117 @@
# gets data/stats # gets data/stats
import yfinance as yf import yfinance as yf
from typing import Annotated, Callable, Any, Optional from typing import Annotated, Callable, Any, Optional
from pandas import DataFrame from pandas import DataFrame
import pandas as pd import pandas as pd
from functools import wraps from functools import wraps
from .utils import save_output, SavePathType, decorate_all_methods from .utils import save_output, SavePathType, decorate_all_methods
def init_ticker(func: Callable) -> Callable: def init_ticker(func: Callable) -> Callable:
"""Decorator to initialize yf.Ticker and pass it to the function.""" """Decorator to initialize yf.Ticker and pass it to the function."""
@wraps(func) @wraps(func)
def wrapper(symbol: Annotated[str, "ticker symbol"], *args, **kwargs) -> Any: def wrapper(symbol: Annotated[str, "ticker symbol"], *args, **kwargs) -> Any:
ticker = yf.Ticker(symbol) ticker = yf.Ticker(symbol)
return func(ticker, *args, **kwargs) return func(ticker, *args, **kwargs)
return wrapper return wrapper
@decorate_all_methods(init_ticker) @decorate_all_methods(init_ticker)
class YFinanceUtils: class YFinanceUtils:
def get_stock_data( def get_stock_data(
symbol: Annotated[str, "ticker symbol"], symbol: Annotated[str, "ticker symbol"],
start_date: Annotated[ start_date: Annotated[
str, "start date for retrieving stock price data, YYYY-mm-dd" str, "start date for retrieving stock price data, YYYY-mm-dd"
], ],
end_date: Annotated[ end_date: Annotated[
str, "end date for retrieving stock price data, YYYY-mm-dd" str, "end date for retrieving stock price data, YYYY-mm-dd"
], ],
save_path: SavePathType = None, save_path: SavePathType = None,
) -> DataFrame: ) -> DataFrame:
"""retrieve stock price data for designated ticker symbol""" """retrieve stock price data for designated ticker symbol"""
ticker = symbol ticker = symbol
# add one day to the end_date so that the data range is inclusive # add one day to the end_date so that the data range is inclusive
end_date = pd.to_datetime(end_date) + pd.DateOffset(days=1) end_date = pd.to_datetime(end_date) + pd.DateOffset(days=1)
end_date = end_date.strftime("%Y-%m-%d") end_date = end_date.strftime("%Y-%m-%d")
stock_data = ticker.history(start=start_date, end=end_date) stock_data = ticker.history(start=start_date, end=end_date)
# save_output(stock_data, f"Stock data for {ticker.ticker}", save_path) # save_output(stock_data, f"Stock data for {ticker.ticker}", save_path)
return stock_data return stock_data
def get_stock_info( def get_stock_info(
symbol: Annotated[str, "ticker symbol"], symbol: Annotated[str, "ticker symbol"],
) -> dict: ) -> dict:
"""Fetches and returns latest stock information.""" """Fetches and returns latest stock information."""
ticker = symbol ticker = symbol
stock_info = ticker.info stock_info = ticker.info
return stock_info return stock_info
def get_company_info( def get_company_info(
symbol: Annotated[str, "ticker symbol"], symbol: Annotated[str, "ticker symbol"],
save_path: Optional[str] = None, save_path: Optional[str] = None,
) -> DataFrame: ) -> DataFrame:
"""Fetches and returns company information as a DataFrame.""" """Fetches and returns company information as a DataFrame."""
ticker = symbol ticker = symbol
info = ticker.info info = ticker.info
company_info = { company_info = {
"Company Name": info.get("shortName", "N/A"), "Company Name": info.get("shortName", "N/A"),
"Industry": info.get("industry", "N/A"), "Industry": info.get("industry", "N/A"),
"Sector": info.get("sector", "N/A"), "Sector": info.get("sector", "N/A"),
"Country": info.get("country", "N/A"), "Country": info.get("country", "N/A"),
"Website": info.get("website", "N/A"), "Website": info.get("website", "N/A"),
} }
company_info_df = DataFrame([company_info]) company_info_df = DataFrame([company_info])
if save_path: if save_path:
company_info_df.to_csv(save_path) company_info_df.to_csv(save_path)
print(f"Company info for {ticker.ticker} saved to {save_path}") print(f"Company info for {ticker.ticker} saved to {save_path}")
return company_info_df return company_info_df
def get_stock_dividends( def get_stock_dividends(
symbol: Annotated[str, "ticker symbol"], symbol: Annotated[str, "ticker symbol"],
save_path: Optional[str] = None, save_path: Optional[str] = None,
) -> DataFrame: ) -> DataFrame:
"""Fetches and returns the latest dividends data as a DataFrame.""" """Fetches and returns the latest dividends data as a DataFrame."""
ticker = symbol ticker = symbol
dividends = ticker.dividends dividends = ticker.dividends
if save_path: if save_path:
dividends.to_csv(save_path) dividends.to_csv(save_path)
print(f"Dividends for {ticker.ticker} saved to {save_path}") print(f"Dividends for {ticker.ticker} saved to {save_path}")
return dividends return dividends
def get_income_stmt(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: def get_income_stmt(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
"""Fetches and returns the latest income statement of the company as a DataFrame.""" """Fetches and returns the latest income statement of the company as a DataFrame."""
ticker = symbol ticker = symbol
income_stmt = ticker.financials income_stmt = ticker.financials
return income_stmt return income_stmt
def get_balance_sheet(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: def get_balance_sheet(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
"""Fetches and returns the latest balance sheet of the company as a DataFrame.""" """Fetches and returns the latest balance sheet of the company as a DataFrame."""
ticker = symbol ticker = symbol
balance_sheet = ticker.balance_sheet balance_sheet = ticker.balance_sheet
return balance_sheet return balance_sheet
def get_cash_flow(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: def get_cash_flow(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
"""Fetches and returns the latest cash flow statement of the company as a DataFrame.""" """Fetches and returns the latest cash flow statement of the company as a DataFrame."""
ticker = symbol ticker = symbol
cash_flow = ticker.cashflow cash_flow = ticker.cashflow
return cash_flow return cash_flow
def get_analyst_recommendations(symbol: Annotated[str, "ticker symbol"]) -> tuple: def get_analyst_recommendations(symbol: Annotated[str, "ticker symbol"]) -> tuple:
"""Fetches the latest analyst recommendations and returns the most common recommendation and its count.""" """Fetches the latest analyst recommendations and returns the most common recommendation and its count."""
ticker = symbol ticker = symbol
recommendations = ticker.recommendations recommendations = ticker.recommendations
if recommendations.empty: if recommendations.empty:
return None, 0 # No recommendations available return None, 0 # No recommendations available
# Assuming 'period' column exists and needs to be excluded # Assuming 'period' column exists and needs to be excluded
row_0 = recommendations.iloc[0, 1:] # Exclude 'period' column if necessary row_0 = recommendations.iloc[0, 1:] # Exclude 'period' column if necessary
# Find the maximum voting result # Find the maximum voting result
max_votes = row_0.max() max_votes = row_0.max()
majority_voting_result = row_0[row_0 == max_votes].index.tolist() majority_voting_result = row_0[row_0 == max_votes].index.tolist()
return majority_voting_result[0], max_votes return majority_voting_result[0], max_votes

View File

@ -1,19 +1,19 @@
import os import os
DEFAULT_CONFIG = { DEFAULT_CONFIG = {
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), "project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"data_dir": "/Users/yluo/Documents/Code/ScAI/FR1-data", "data_dir": "/Users/yluo/Documents/Code/ScAI/FR1-data",
"data_cache_dir": os.path.join( "data_cache_dir": os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"dataflows/data_cache", "dataflows/data_cache",
), ),
# LLM settings # LLM settings
"deep_think_llm": "o4-mini", "deep_think_llm": "o4-mini",
"quick_think_llm": "gpt-4o-mini", "quick_think_llm": "gpt-4o-mini",
# Debate and discussion settings # Debate and discussion settings
"max_debate_rounds": 1, "max_debate_rounds": 1,
"max_risk_discuss_rounds": 1, "max_risk_discuss_rounds": 1,
"max_recur_limit": 100, "max_recur_limit": 100,
# Tool settings # Tool settings
"online_tools": True, "online_tools": True,
} }

View File

@ -1,17 +1,17 @@
# TradingAgents/graph/__init__.py # TradingAgents/graph/__init__.py
from .trading_graph import TradingAgentsGraph from .trading_graph import TradingAgentsGraph
from .conditional_logic import ConditionalLogic from .conditional_logic import ConditionalLogic
from .setup import GraphSetup from .setup import GraphSetup
from .propagation import Propagator from .propagation import Propagator
from .reflection import Reflector from .reflection import Reflector
from .signal_processing import SignalProcessor from .signal_processing import SignalProcessor
__all__ = [ __all__ = [
"TradingAgentsGraph", "TradingAgentsGraph",
"ConditionalLogic", "ConditionalLogic",
"GraphSetup", "GraphSetup",
"Propagator", "Propagator",
"Reflector", "Reflector",
"SignalProcessor", "SignalProcessor",
] ]

View File

@ -1,67 +1,67 @@
# TradingAgents/graph/conditional_logic.py # TradingAgents/graph/conditional_logic.py
from tradingagents.agents.utils.agent_states import AgentState from tradingagents.agents.utils.agent_states import AgentState
class ConditionalLogic: class ConditionalLogic:
"""Handles conditional logic for determining graph flow.""" """Handles conditional logic for determining graph flow."""
def __init__(self, max_debate_rounds=1, max_risk_discuss_rounds=1): def __init__(self, max_debate_rounds=1, max_risk_discuss_rounds=1):
"""Initialize with configuration parameters.""" """Initialize with configuration parameters."""
self.max_debate_rounds = max_debate_rounds self.max_debate_rounds = max_debate_rounds
self.max_risk_discuss_rounds = max_risk_discuss_rounds self.max_risk_discuss_rounds = max_risk_discuss_rounds
def should_continue_market(self, state: AgentState): def should_continue_market(self, state: AgentState):
"""Determine if market analysis should continue.""" """Determine if market analysis should continue."""
messages = state["messages"] messages = state["messages"]
last_message = messages[-1] last_message = messages[-1]
if last_message.tool_calls: if last_message.tool_calls:
return "tools_market" return "tools_market"
return "Msg Clear Market" return "Msg Clear Market"
def should_continue_social(self, state: AgentState): def should_continue_social(self, state: AgentState):
"""Determine if social media analysis should continue.""" """Determine if social media analysis should continue."""
messages = state["messages"] messages = state["messages"]
last_message = messages[-1] last_message = messages[-1]
if last_message.tool_calls: if last_message.tool_calls:
return "tools_social" return "tools_social"
return "Msg Clear Social" return "Msg Clear Social"
def should_continue_news(self, state: AgentState): def should_continue_news(self, state: AgentState):
"""Determine if news analysis should continue.""" """Determine if news analysis should continue."""
messages = state["messages"] messages = state["messages"]
last_message = messages[-1] last_message = messages[-1]
if last_message.tool_calls: if last_message.tool_calls:
return "tools_news" return "tools_news"
return "Msg Clear News" return "Msg Clear News"
def should_continue_fundamentals(self, state: AgentState): def should_continue_fundamentals(self, state: AgentState):
"""Determine if fundamentals analysis should continue.""" """Determine if fundamentals analysis should continue."""
messages = state["messages"] messages = state["messages"]
last_message = messages[-1] last_message = messages[-1]
if last_message.tool_calls: if last_message.tool_calls:
return "tools_fundamentals" return "tools_fundamentals"
return "Msg Clear Fundamentals" return "Msg Clear Fundamentals"
def should_continue_debate(self, state: AgentState) -> str: def should_continue_debate(self, state: AgentState) -> str:
"""Determine if debate should continue.""" """Determine if debate should continue."""
if ( if (
state["investment_debate_state"]["count"] >= 2 * self.max_debate_rounds state["investment_debate_state"]["count"] >= 2 * self.max_debate_rounds
): # 3 rounds of back-and-forth between 2 agents ): # 3 rounds of back-and-forth between 2 agents
return "Research Manager" return "Research Manager"
if state["investment_debate_state"]["current_response"].startswith("Bull"): if state["investment_debate_state"]["current_response"].startswith("Bull"):
return "Bear Researcher" return "Bear Researcher"
return "Bull Researcher" return "Bull Researcher"
def should_continue_risk_analysis(self, state: AgentState) -> str: def should_continue_risk_analysis(self, state: AgentState) -> str:
"""Determine if risk analysis should continue.""" """Determine if risk analysis should continue."""
if ( if (
state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds
): # 3 rounds of back-and-forth between 3 agents ): # 3 rounds of back-and-forth between 3 agents
return "Risk Judge" return "Risk Judge"
if state["risk_debate_state"]["latest_speaker"].startswith("Risky"): if state["risk_debate_state"]["latest_speaker"].startswith("Risky"):
return "Safe Analyst" return "Safe Analyst"
if state["risk_debate_state"]["latest_speaker"].startswith("Safe"): if state["risk_debate_state"]["latest_speaker"].startswith("Safe"):
return "Neutral Analyst" return "Neutral Analyst"
return "Risky Analyst" return "Risky Analyst"

View File

@ -1,49 +1,49 @@
# TradingAgents/graph/propagation.py # TradingAgents/graph/propagation.py
from typing import Dict, Any from typing import Dict, Any
from tradingagents.agents.utils.agent_states import ( from tradingagents.agents.utils.agent_states import (
AgentState, AgentState,
InvestDebateState, InvestDebateState,
RiskDebateState, RiskDebateState,
) )
class Propagator: class Propagator:
"""Handles state initialization and propagation through the graph.""" """Handles state initialization and propagation through the graph."""
def __init__(self, max_recur_limit=100): def __init__(self, max_recur_limit=100):
"""Initialize with configuration parameters.""" """Initialize with configuration parameters."""
self.max_recur_limit = max_recur_limit self.max_recur_limit = max_recur_limit
def create_initial_state( def create_initial_state(
self, company_name: str, trade_date: str self, company_name: str, trade_date: str
) -> Dict[str, Any]: ) -> Dict[str, Any]:
"""Create the initial state for the agent graph.""" """Create the initial state for the agent graph."""
return { return {
"messages": [("human", company_name)], "messages": [("human", company_name)],
"company_of_interest": company_name, "company_of_interest": company_name,
"trade_date": str(trade_date), "trade_date": str(trade_date),
"investment_debate_state": InvestDebateState( "investment_debate_state": InvestDebateState(
{"history": "", "current_response": "", "count": 0} {"history": "", "current_response": "", "count": 0}
), ),
"risk_debate_state": RiskDebateState( "risk_debate_state": RiskDebateState(
{ {
"history": "", "history": "",
"current_risky_response": "", "current_risky_response": "",
"current_safe_response": "", "current_safe_response": "",
"current_neutral_response": "", "current_neutral_response": "",
"count": 0, "count": 0,
} }
), ),
"market_report": "", "market_report": "",
"fundamentals_report": "", "fundamentals_report": "",
"sentiment_report": "", "sentiment_report": "",
"news_report": "", "news_report": "",
} }
def get_graph_args(self) -> Dict[str, Any]: def get_graph_args(self) -> Dict[str, Any]:
"""Get arguments for the graph invocation.""" """Get arguments for the graph invocation."""
return { return {
"stream_mode": "values", "stream_mode": "values",
"config": {"recursion_limit": self.max_recur_limit}, "config": {"recursion_limit": self.max_recur_limit},
} }

View File

@ -1,121 +1,121 @@
# TradingAgents/graph/reflection.py # TradingAgents/graph/reflection.py
from typing import Dict, Any from typing import Dict, Any
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
class Reflector: class Reflector:
"""Handles reflection on decisions and updating memory.""" """Handles reflection on decisions and updating memory."""
def __init__(self, quick_thinking_llm: ChatOpenAI): def __init__(self, quick_thinking_llm: ChatOpenAI):
"""Initialize the reflector with an LLM.""" """Initialize the reflector with an LLM."""
self.quick_thinking_llm = quick_thinking_llm self.quick_thinking_llm = quick_thinking_llm
self.reflection_system_prompt = self._get_reflection_prompt() self.reflection_system_prompt = self._get_reflection_prompt()
def _get_reflection_prompt(self) -> str: def _get_reflection_prompt(self) -> str:
"""Get the system prompt for reflection.""" """Get the system prompt for reflection."""
return """ return """
You are an expert financial analyst tasked with reviewing trading decisions/analysis and providing a comprehensive, step-by-step analysis. You are an expert financial analyst tasked with reviewing trading decisions/analysis and providing a comprehensive, step-by-step analysis.
Your goal is to deliver detailed insights into investment decisions and highlight opportunities for improvement, adhering strictly to the following guidelines: Your goal is to deliver detailed insights into investment decisions and highlight opportunities for improvement, adhering strictly to the following guidelines:
1. Reasoning: 1. Reasoning:
- For each trading decision, determine whether it was correct or incorrect. A correct decision results in an increase in returns, while an incorrect decision does the opposite. - For each trading decision, determine whether it was correct or incorrect. A correct decision results in an increase in returns, while an incorrect decision does the opposite.
- Analyze the contributing factors to each success or mistake. Consider: - Analyze the contributing factors to each success or mistake. Consider:
- Market intelligence. - Market intelligence.
- Technical indicators. - Technical indicators.
- Technical signals. - Technical signals.
- Price movement analysis. - Price movement analysis.
- Overall market data analysis - Overall market data analysis
- News analysis. - News analysis.
- Social media and sentiment analysis. - Social media and sentiment analysis.
- Fundamental data analysis. - Fundamental data analysis.
- Weight the importance of each factor in the decision-making process. - Weight the importance of each factor in the decision-making process.
2. Improvement: 2. Improvement:
- For any incorrect decisions, propose revisions to maximize returns. - For any incorrect decisions, propose revisions to maximize returns.
- Provide a detailed list of corrective actions or improvements, including specific recommendations (e.g., changing a decision from HOLD to BUY on a particular date). - Provide a detailed list of corrective actions or improvements, including specific recommendations (e.g., changing a decision from HOLD to BUY on a particular date).
3. Summary: 3. Summary:
- Summarize the lessons learned from the successes and mistakes. - Summarize the lessons learned from the successes and mistakes.
- Highlight how these lessons can be adapted for future trading scenarios and draw connections between similar situations to apply the knowledge gained. - Highlight how these lessons can be adapted for future trading scenarios and draw connections between similar situations to apply the knowledge gained.
4. Query: 4. Query:
- Extract key insights from the summary into a concise sentence of no more than 1000 tokens. - Extract key insights from the summary into a concise sentence of no more than 1000 tokens.
- Ensure the condensed sentence captures the essence of the lessons and reasoning for easy reference. - Ensure the condensed sentence captures the essence of the lessons and reasoning for easy reference.
Adhere strictly to these instructions, and ensure your output is detailed, accurate, and actionable. You will also be given objective descriptions of the market from a price movements, technical indicator, news, and sentiment perspective to provide more context for your analysis. Adhere strictly to these instructions, and ensure your output is detailed, accurate, and actionable. You will also be given objective descriptions of the market from a price movements, technical indicator, news, and sentiment perspective to provide more context for your analysis.
""" """
def _extract_current_situation(self, current_state: Dict[str, Any]) -> str: def _extract_current_situation(self, current_state: Dict[str, Any]) -> str:
"""Extract the current market situation from the state.""" """Extract the current market situation from the state."""
curr_market_report = current_state["market_report"] curr_market_report = current_state["market_report"]
curr_sentiment_report = current_state["sentiment_report"] curr_sentiment_report = current_state["sentiment_report"]
curr_news_report = current_state["news_report"] curr_news_report = current_state["news_report"]
curr_fundamentals_report = current_state["fundamentals_report"] curr_fundamentals_report = current_state["fundamentals_report"]
return f"{curr_market_report}\n\n{curr_sentiment_report}\n\n{curr_news_report}\n\n{curr_fundamentals_report}" return f"{curr_market_report}\n\n{curr_sentiment_report}\n\n{curr_news_report}\n\n{curr_fundamentals_report}"
def _reflect_on_component( def _reflect_on_component(
self, component_type: str, report: str, situation: str, returns_losses self, component_type: str, report: str, situation: str, returns_losses
) -> str: ) -> str:
"""Generate reflection for a component.""" """Generate reflection for a component."""
messages = [ messages = [
("system", self.reflection_system_prompt), ("system", self.reflection_system_prompt),
( (
"human", "human",
f"Returns: {returns_losses}\n\nAnalysis/Decision: {report}\n\nObjective Market Reports for Reference: {situation}", f"Returns: {returns_losses}\n\nAnalysis/Decision: {report}\n\nObjective Market Reports for Reference: {situation}",
), ),
] ]
result = self.quick_thinking_llm.invoke(messages).content result = self.quick_thinking_llm.invoke(messages).content
return result return result
def reflect_bull_researcher(self, current_state, returns_losses, bull_memory): def reflect_bull_researcher(self, current_state, returns_losses, bull_memory):
"""Reflect on bull researcher's analysis and update memory.""" """Reflect on bull researcher's analysis and update memory."""
situation = self._extract_current_situation(current_state) situation = self._extract_current_situation(current_state)
bull_debate_history = current_state["investment_debate_state"]["bull_history"] bull_debate_history = current_state["investment_debate_state"]["bull_history"]
result = self._reflect_on_component( result = self._reflect_on_component(
"BULL", bull_debate_history, situation, returns_losses "BULL", bull_debate_history, situation, returns_losses
) )
bull_memory.add_situations([(situation, result)]) bull_memory.add_situations([(situation, result)])
def reflect_bear_researcher(self, current_state, returns_losses, bear_memory): def reflect_bear_researcher(self, current_state, returns_losses, bear_memory):
"""Reflect on bear researcher's analysis and update memory.""" """Reflect on bear researcher's analysis and update memory."""
situation = self._extract_current_situation(current_state) situation = self._extract_current_situation(current_state)
bear_debate_history = current_state["investment_debate_state"]["bear_history"] bear_debate_history = current_state["investment_debate_state"]["bear_history"]
result = self._reflect_on_component( result = self._reflect_on_component(
"BEAR", bear_debate_history, situation, returns_losses "BEAR", bear_debate_history, situation, returns_losses
) )
bear_memory.add_situations([(situation, result)]) bear_memory.add_situations([(situation, result)])
def reflect_trader(self, current_state, returns_losses, trader_memory): def reflect_trader(self, current_state, returns_losses, trader_memory):
"""Reflect on trader's decision and update memory.""" """Reflect on trader's decision and update memory."""
situation = self._extract_current_situation(current_state) situation = self._extract_current_situation(current_state)
trader_decision = current_state["trader_investment_plan"] trader_decision = current_state["trader_investment_plan"]
result = self._reflect_on_component( result = self._reflect_on_component(
"TRADER", trader_decision, situation, returns_losses "TRADER", trader_decision, situation, returns_losses
) )
trader_memory.add_situations([(situation, result)]) trader_memory.add_situations([(situation, result)])
def reflect_invest_judge(self, current_state, returns_losses, invest_judge_memory): def reflect_invest_judge(self, current_state, returns_losses, invest_judge_memory):
"""Reflect on investment judge's decision and update memory.""" """Reflect on investment judge's decision and update memory."""
situation = self._extract_current_situation(current_state) situation = self._extract_current_situation(current_state)
judge_decision = current_state["investment_debate_state"]["judge_decision"] judge_decision = current_state["investment_debate_state"]["judge_decision"]
result = self._reflect_on_component( result = self._reflect_on_component(
"INVEST JUDGE", judge_decision, situation, returns_losses "INVEST JUDGE", judge_decision, situation, returns_losses
) )
invest_judge_memory.add_situations([(situation, result)]) invest_judge_memory.add_situations([(situation, result)])
def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory): def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory):
"""Reflect on risk manager's decision and update memory.""" """Reflect on risk manager's decision and update memory."""
situation = self._extract_current_situation(current_state) situation = self._extract_current_situation(current_state)
judge_decision = current_state["risk_debate_state"]["judge_decision"] judge_decision = current_state["risk_debate_state"]["judge_decision"]
result = self._reflect_on_component( result = self._reflect_on_component(
"RISK JUDGE", judge_decision, situation, returns_losses "RISK JUDGE", judge_decision, situation, returns_losses
) )
risk_manager_memory.add_situations([(situation, result)]) risk_manager_memory.add_situations([(situation, result)])

View File

@ -1,205 +1,205 @@
# TradingAgents/graph/setup.py # TradingAgents/graph/setup.py
from typing import Dict, Any from typing import Dict, Any
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph, START from langgraph.graph import END, StateGraph, START
from langgraph.prebuilt import ToolNode from langgraph.prebuilt import ToolNode
from tradingagents.agents import * from tradingagents.agents import *
from tradingagents.agents.utils.agent_states import AgentState from tradingagents.agents.utils.agent_states import AgentState
from tradingagents.agents.utils.agent_utils import Toolkit from tradingagents.agents.utils.agent_utils import Toolkit
from .conditional_logic import ConditionalLogic from .conditional_logic import ConditionalLogic
class GraphSetup: class GraphSetup:
"""Handles the setup and configuration of the agent graph.""" """Handles the setup and configuration of the agent graph."""
def __init__( def __init__(
self, self,
quick_thinking_llm: ChatOpenAI, quick_thinking_llm: ChatOpenAI,
deep_thinking_llm: ChatOpenAI, deep_thinking_llm: ChatOpenAI,
toolkit: Toolkit, toolkit: Toolkit,
tool_nodes: Dict[str, ToolNode], tool_nodes: Dict[str, ToolNode],
bull_memory, bull_memory,
bear_memory, bear_memory,
trader_memory, trader_memory,
invest_judge_memory, invest_judge_memory,
risk_manager_memory, risk_manager_memory,
conditional_logic: ConditionalLogic, conditional_logic: ConditionalLogic,
): ):
"""Initialize with required components.""" """Initialize with required components."""
self.quick_thinking_llm = quick_thinking_llm self.quick_thinking_llm = quick_thinking_llm
self.deep_thinking_llm = deep_thinking_llm self.deep_thinking_llm = deep_thinking_llm
self.toolkit = toolkit self.toolkit = toolkit
self.tool_nodes = tool_nodes self.tool_nodes = tool_nodes
self.bull_memory = bull_memory self.bull_memory = bull_memory
self.bear_memory = bear_memory self.bear_memory = bear_memory
self.trader_memory = trader_memory self.trader_memory = trader_memory
self.invest_judge_memory = invest_judge_memory self.invest_judge_memory = invest_judge_memory
self.risk_manager_memory = risk_manager_memory self.risk_manager_memory = risk_manager_memory
self.conditional_logic = conditional_logic self.conditional_logic = conditional_logic
def setup_graph( def setup_graph(
self, selected_analysts=["market", "social", "news", "fundamentals"] self, selected_analysts=["market", "social", "news", "fundamentals"]
): ):
"""Set up and compile the agent workflow graph. """Set up and compile the agent workflow graph.
Args: Args:
selected_analysts (list): List of analyst types to include. Options are: selected_analysts (list): List of analyst types to include. Options are:
- "market": Market analyst - "market": Market analyst
- "social": Social media analyst - "social": Social media analyst
- "news": News analyst - "news": News analyst
- "fundamentals": Fundamentals analyst - "fundamentals": Fundamentals analyst
""" """
if len(selected_analysts) == 0: if len(selected_analysts) == 0:
raise ValueError("Trading Agents Graph Setup Error: no analysts selected!") raise ValueError("Trading Agents Graph Setup Error: no analysts selected!")
# Create analyst nodes # Create analyst nodes
analyst_nodes = {} analyst_nodes = {}
delete_nodes = {} delete_nodes = {}
tool_nodes = {} tool_nodes = {}
if "market" in selected_analysts: if "market" in selected_analysts:
analyst_nodes["market"] = create_market_analyst( analyst_nodes["market"] = create_market_analyst(
self.quick_thinking_llm, self.toolkit self.quick_thinking_llm, self.toolkit
) )
delete_nodes["market"] = create_msg_delete() delete_nodes["market"] = create_msg_delete()
tool_nodes["market"] = self.tool_nodes["market"] tool_nodes["market"] = self.tool_nodes["market"]
if "social" in selected_analysts: if "social" in selected_analysts:
analyst_nodes["social"] = create_social_media_analyst( analyst_nodes["social"] = create_social_media_analyst(
self.quick_thinking_llm, self.toolkit self.quick_thinking_llm, self.toolkit
) )
delete_nodes["social"] = create_msg_delete() delete_nodes["social"] = create_msg_delete()
tool_nodes["social"] = self.tool_nodes["social"] tool_nodes["social"] = self.tool_nodes["social"]
if "news" in selected_analysts: if "news" in selected_analysts:
analyst_nodes["news"] = create_news_analyst( analyst_nodes["news"] = create_news_analyst(
self.quick_thinking_llm, self.toolkit self.quick_thinking_llm, self.toolkit
) )
delete_nodes["news"] = create_msg_delete() delete_nodes["news"] = create_msg_delete()
tool_nodes["news"] = self.tool_nodes["news"] tool_nodes["news"] = self.tool_nodes["news"]
if "fundamentals" in selected_analysts: if "fundamentals" in selected_analysts:
analyst_nodes["fundamentals"] = create_fundamentals_analyst( analyst_nodes["fundamentals"] = create_fundamentals_analyst(
self.quick_thinking_llm, self.toolkit self.quick_thinking_llm, self.toolkit
) )
delete_nodes["fundamentals"] = create_msg_delete() delete_nodes["fundamentals"] = create_msg_delete()
tool_nodes["fundamentals"] = self.tool_nodes["fundamentals"] tool_nodes["fundamentals"] = self.tool_nodes["fundamentals"]
# Create researcher and manager nodes # Create researcher and manager nodes
bull_researcher_node = create_bull_researcher( bull_researcher_node = create_bull_researcher(
self.quick_thinking_llm, self.bull_memory self.quick_thinking_llm, self.bull_memory
) )
bear_researcher_node = create_bear_researcher( bear_researcher_node = create_bear_researcher(
self.quick_thinking_llm, self.bear_memory self.quick_thinking_llm, self.bear_memory
) )
research_manager_node = create_research_manager( research_manager_node = create_research_manager(
self.deep_thinking_llm, self.invest_judge_memory self.deep_thinking_llm, self.invest_judge_memory
) )
trader_node = create_trader(self.quick_thinking_llm, self.trader_memory) trader_node = create_trader(self.quick_thinking_llm, self.trader_memory)
# Create risk analysis nodes # Create risk analysis nodes
risky_analyst = create_risky_debator(self.quick_thinking_llm) risky_analyst = create_risky_debator(self.quick_thinking_llm)
neutral_analyst = create_neutral_debator(self.quick_thinking_llm) neutral_analyst = create_neutral_debator(self.quick_thinking_llm)
safe_analyst = create_safe_debator(self.quick_thinking_llm) safe_analyst = create_safe_debator(self.quick_thinking_llm)
risk_manager_node = create_risk_manager( risk_manager_node = create_risk_manager(
self.deep_thinking_llm, self.risk_manager_memory self.deep_thinking_llm, self.risk_manager_memory
) )
# Create workflow # Create workflow
workflow = StateGraph(AgentState) workflow = StateGraph(AgentState)
# Add analyst nodes to the graph # Add analyst nodes to the graph
for analyst_type, node in analyst_nodes.items(): for analyst_type, node in analyst_nodes.items():
workflow.add_node(f"{analyst_type.capitalize()} Analyst", node) workflow.add_node(f"{analyst_type.capitalize()} Analyst", node)
workflow.add_node( workflow.add_node(
f"Msg Clear {analyst_type.capitalize()}", delete_nodes[analyst_type] f"Msg Clear {analyst_type.capitalize()}", delete_nodes[analyst_type]
) )
workflow.add_node(f"tools_{analyst_type}", tool_nodes[analyst_type]) workflow.add_node(f"tools_{analyst_type}", tool_nodes[analyst_type])
# Add other nodes # Add other nodes
workflow.add_node("Bull Researcher", bull_researcher_node) workflow.add_node("Bull Researcher", bull_researcher_node)
workflow.add_node("Bear Researcher", bear_researcher_node) workflow.add_node("Bear Researcher", bear_researcher_node)
workflow.add_node("Research Manager", research_manager_node) workflow.add_node("Research Manager", research_manager_node)
workflow.add_node("Trader", trader_node) workflow.add_node("Trader", trader_node)
workflow.add_node("Risky Analyst", risky_analyst) workflow.add_node("Risky Analyst", risky_analyst)
workflow.add_node("Neutral Analyst", neutral_analyst) workflow.add_node("Neutral Analyst", neutral_analyst)
workflow.add_node("Safe Analyst", safe_analyst) workflow.add_node("Safe Analyst", safe_analyst)
workflow.add_node("Risk Judge", risk_manager_node) workflow.add_node("Risk Judge", risk_manager_node)
# Define edges # Define edges
# Start with the first analyst # Start with the first analyst
first_analyst = selected_analysts[0] first_analyst = selected_analysts[0]
workflow.add_edge(START, f"{first_analyst.capitalize()} Analyst") workflow.add_edge(START, f"{first_analyst.capitalize()} Analyst")
# Connect analysts in sequence # Connect analysts in sequence
for i, analyst_type in enumerate(selected_analysts): for i, analyst_type in enumerate(selected_analysts):
current_analyst = f"{analyst_type.capitalize()} Analyst" current_analyst = f"{analyst_type.capitalize()} Analyst"
current_tools = f"tools_{analyst_type}" current_tools = f"tools_{analyst_type}"
current_clear = f"Msg Clear {analyst_type.capitalize()}" current_clear = f"Msg Clear {analyst_type.capitalize()}"
# Add conditional edges for current analyst # Add conditional edges for current analyst
workflow.add_conditional_edges( workflow.add_conditional_edges(
current_analyst, current_analyst,
getattr(self.conditional_logic, f"should_continue_{analyst_type}"), getattr(self.conditional_logic, f"should_continue_{analyst_type}"),
[current_tools, current_clear], [current_tools, current_clear],
) )
workflow.add_edge(current_tools, current_analyst) workflow.add_edge(current_tools, current_analyst)
# Connect to next analyst or to Bull Researcher if this is the last analyst # Connect to next analyst or to Bull Researcher if this is the last analyst
if i < len(selected_analysts) - 1: if i < len(selected_analysts) - 1:
next_analyst = f"{selected_analysts[i+1].capitalize()} Analyst" next_analyst = f"{selected_analysts[i+1].capitalize()} Analyst"
workflow.add_edge(current_clear, next_analyst) workflow.add_edge(current_clear, next_analyst)
else: else:
workflow.add_edge(current_clear, "Bull Researcher") workflow.add_edge(current_clear, "Bull Researcher")
# Add remaining edges # Add remaining edges
workflow.add_conditional_edges( workflow.add_conditional_edges(
"Bull Researcher", "Bull Researcher",
self.conditional_logic.should_continue_debate, self.conditional_logic.should_continue_debate,
{ {
"Bear Researcher": "Bear Researcher", "Bear Researcher": "Bear Researcher",
"Research Manager": "Research Manager", "Research Manager": "Research Manager",
}, },
) )
workflow.add_conditional_edges( workflow.add_conditional_edges(
"Bear Researcher", "Bear Researcher",
self.conditional_logic.should_continue_debate, self.conditional_logic.should_continue_debate,
{ {
"Bull Researcher": "Bull Researcher", "Bull Researcher": "Bull Researcher",
"Research Manager": "Research Manager", "Research Manager": "Research Manager",
}, },
) )
workflow.add_edge("Research Manager", "Trader") workflow.add_edge("Research Manager", "Trader")
workflow.add_edge("Trader", "Risky Analyst") workflow.add_edge("Trader", "Risky Analyst")
workflow.add_conditional_edges( workflow.add_conditional_edges(
"Risky Analyst", "Risky Analyst",
self.conditional_logic.should_continue_risk_analysis, self.conditional_logic.should_continue_risk_analysis,
{ {
"Safe Analyst": "Safe Analyst", "Safe Analyst": "Safe Analyst",
"Risk Judge": "Risk Judge", "Risk Judge": "Risk Judge",
}, },
) )
workflow.add_conditional_edges( workflow.add_conditional_edges(
"Safe Analyst", "Safe Analyst",
self.conditional_logic.should_continue_risk_analysis, self.conditional_logic.should_continue_risk_analysis,
{ {
"Neutral Analyst": "Neutral Analyst", "Neutral Analyst": "Neutral Analyst",
"Risk Judge": "Risk Judge", "Risk Judge": "Risk Judge",
}, },
) )
workflow.add_conditional_edges( workflow.add_conditional_edges(
"Neutral Analyst", "Neutral Analyst",
self.conditional_logic.should_continue_risk_analysis, self.conditional_logic.should_continue_risk_analysis,
{ {
"Risky Analyst": "Risky Analyst", "Risky Analyst": "Risky Analyst",
"Risk Judge": "Risk Judge", "Risk Judge": "Risk Judge",
}, },
) )
workflow.add_edge("Risk Judge", END) workflow.add_edge("Risk Judge", END)
# Compile and return # Compile and return
return workflow.compile() return workflow.compile()

View File

@ -1,31 +1,31 @@
# TradingAgents/graph/signal_processing.py # TradingAgents/graph/signal_processing.py
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
class SignalProcessor: class SignalProcessor:
"""Processes trading signals to extract actionable decisions.""" """Processes trading signals to extract actionable decisions."""
def __init__(self, quick_thinking_llm: ChatOpenAI): def __init__(self, quick_thinking_llm: ChatOpenAI):
"""Initialize with an LLM for processing.""" """Initialize with an LLM for processing."""
self.quick_thinking_llm = quick_thinking_llm self.quick_thinking_llm = quick_thinking_llm
def process_signal(self, full_signal: str) -> str: def process_signal(self, full_signal: str) -> str:
""" """
Process a full trading signal to extract the core decision. Process a full trading signal to extract the core decision.
Args: Args:
full_signal: Complete trading signal text full_signal: Complete trading signal text
Returns: Returns:
Extracted decision (BUY, SELL, or HOLD) Extracted decision (BUY, SELL, or HOLD)
""" """
messages = [ messages = [
( (
"system", "system",
"You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.", "You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.",
), ),
("human", full_signal), ("human", full_signal),
] ]
return self.quick_thinking_llm.invoke(messages).content return self.quick_thinking_llm.invoke(messages).content

View File

@ -1,243 +1,243 @@
# TradingAgents/graph/trading_graph.py # TradingAgents/graph/trading_graph.py
import os import os
from pathlib import Path from pathlib import Path
import json import json
from datetime import date from datetime import date
from typing import Dict, Any, Tuple, List, Optional from typing import Dict, Any, Tuple, List, Optional
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langgraph.prebuilt import ToolNode from langgraph.prebuilt import ToolNode
from tradingagents.agents import * from tradingagents.agents import *
from tradingagents.default_config import DEFAULT_CONFIG from tradingagents.default_config import DEFAULT_CONFIG
from tradingagents.agents.utils.memory import FinancialSituationMemory from tradingagents.agents.utils.memory import FinancialSituationMemory
from tradingagents.agents.utils.agent_states import ( from tradingagents.agents.utils.agent_states import (
AgentState, AgentState,
InvestDebateState, InvestDebateState,
RiskDebateState, RiskDebateState,
) )
from tradingagents.dataflows.interface import set_config from tradingagents.dataflows.interface import set_config
from .conditional_logic import ConditionalLogic from .conditional_logic import ConditionalLogic
from .setup import GraphSetup from .setup import GraphSetup
from .propagation import Propagator from .propagation import Propagator
from .reflection import Reflector from .reflection import Reflector
from .signal_processing import SignalProcessor from .signal_processing import SignalProcessor
class TradingAgentsGraph: class TradingAgentsGraph:
"""Main class that orchestrates the trading agents framework.""" """Main class that orchestrates the trading agents framework."""
def __init__( def __init__(
self, self,
selected_analysts=["market", "social", "news", "fundamentals"], selected_analysts=["market", "social", "news", "fundamentals"],
debug=False, debug=False,
config: Dict[str, Any] = None, config: Dict[str, Any] = None,
): ):
"""Initialize the trading agents graph and components. """Initialize the trading agents graph and components.
Args: Args:
selected_analysts: List of analyst types to include selected_analysts: List of analyst types to include
debug: Whether to run in debug mode debug: Whether to run in debug mode
config: Configuration dictionary. If None, uses default config config: Configuration dictionary. If None, uses default config
""" """
self.debug = debug self.debug = debug
self.config = config or DEFAULT_CONFIG self.config = config or DEFAULT_CONFIG
# Update the interface's config # Update the interface's config
set_config(self.config) set_config(self.config)
# Create necessary directories # Create necessary directories
os.makedirs( os.makedirs(
os.path.join(self.config["project_dir"], "dataflows/data_cache"), os.path.join(self.config["project_dir"], "dataflows/data_cache"),
exist_ok=True, exist_ok=True,
) )
# Initialize LLMs # Initialize LLMs
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"]) self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"])
self.quick_thinking_llm = ChatOpenAI( self.quick_thinking_llm = ChatOpenAI(
model=self.config["quick_think_llm"], temperature=0.1 model=self.config["quick_think_llm"], temperature=0.1
) )
self.toolkit = Toolkit(config=self.config) self.toolkit = Toolkit(config=self.config)
# Initialize memories # Initialize memories
self.bull_memory = FinancialSituationMemory("bull_memory") self.bull_memory = FinancialSituationMemory("bull_memory")
self.bear_memory = FinancialSituationMemory("bear_memory") self.bear_memory = FinancialSituationMemory("bear_memory")
self.trader_memory = FinancialSituationMemory("trader_memory") self.trader_memory = FinancialSituationMemory("trader_memory")
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory") self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory")
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory") self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory")
# Create tool nodes # Create tool nodes
self.tool_nodes = self._create_tool_nodes() self.tool_nodes = self._create_tool_nodes()
# Initialize components # Initialize components
self.conditional_logic = ConditionalLogic() self.conditional_logic = ConditionalLogic()
self.graph_setup = GraphSetup( self.graph_setup = GraphSetup(
self.quick_thinking_llm, self.quick_thinking_llm,
self.deep_thinking_llm, self.deep_thinking_llm,
self.toolkit, self.toolkit,
self.tool_nodes, self.tool_nodes,
self.bull_memory, self.bull_memory,
self.bear_memory, self.bear_memory,
self.trader_memory, self.trader_memory,
self.invest_judge_memory, self.invest_judge_memory,
self.risk_manager_memory, self.risk_manager_memory,
self.conditional_logic, self.conditional_logic,
) )
self.propagator = Propagator() self.propagator = Propagator()
self.reflector = Reflector(self.quick_thinking_llm) self.reflector = Reflector(self.quick_thinking_llm)
self.signal_processor = SignalProcessor(self.quick_thinking_llm) self.signal_processor = SignalProcessor(self.quick_thinking_llm)
# State tracking # State tracking
self.curr_state = None self.curr_state = None
self.ticker = None self.ticker = None
self.log_states_dict = {} # date to full state dict self.log_states_dict = {} # date to full state dict
# Set up the graph # Set up the graph
self.graph = self.graph_setup.setup_graph(selected_analysts) self.graph = self.graph_setup.setup_graph(selected_analysts)
def _create_tool_nodes(self) -> Dict[str, ToolNode]: def _create_tool_nodes(self) -> Dict[str, ToolNode]:
"""Create tool nodes for different data sources.""" """Create tool nodes for different data sources."""
return { return {
"market": ToolNode( "market": ToolNode(
[ [
# online tools # online tools
self.toolkit.get_YFin_data_online, self.toolkit.get_YFin_data_online,
self.toolkit.get_stockstats_indicators_report_online, self.toolkit.get_stockstats_indicators_report_online,
# offline tools # offline tools
self.toolkit.get_YFin_data, self.toolkit.get_YFin_data,
self.toolkit.get_stockstats_indicators_report, self.toolkit.get_stockstats_indicators_report,
] ]
), ),
"social": ToolNode( "social": ToolNode(
[ [
# online tools # online tools
self.toolkit.get_stock_news_openai, self.toolkit.get_stock_news_openai,
# offline tools # offline tools
self.toolkit.get_reddit_stock_info, self.toolkit.get_reddit_stock_info,
] ]
), ),
"news": ToolNode( "news": ToolNode(
[ [
# online tools # online tools
self.toolkit.get_global_news_openai, self.toolkit.get_global_news_openai,
self.toolkit.get_google_news, self.toolkit.get_google_news,
# offline tools # offline tools
self.toolkit.get_finnhub_news, self.toolkit.get_finnhub_news,
self.toolkit.get_reddit_news, self.toolkit.get_reddit_news,
] ]
), ),
"fundamentals": ToolNode( "fundamentals": ToolNode(
[ [
# online tools # online tools
self.toolkit.get_fundamentals_openai, self.toolkit.get_fundamentals_openai,
# offline tools # offline tools
self.toolkit.get_finnhub_company_insider_sentiment, self.toolkit.get_finnhub_company_insider_sentiment,
self.toolkit.get_finnhub_company_insider_transactions, self.toolkit.get_finnhub_company_insider_transactions,
self.toolkit.get_simfin_balance_sheet, self.toolkit.get_simfin_balance_sheet,
self.toolkit.get_simfin_cashflow, self.toolkit.get_simfin_cashflow,
self.toolkit.get_simfin_income_stmt, self.toolkit.get_simfin_income_stmt,
] ]
), ),
} }
def propagate(self, company_name, trade_date): def propagate(self, company_name, trade_date):
"""Run the trading agents graph for a company on a specific date.""" """Run the trading agents graph for a company on a specific date."""
self.ticker = company_name self.ticker = company_name
# Initialize state # Initialize state
init_agent_state = self.propagator.create_initial_state( init_agent_state = self.propagator.create_initial_state(
company_name, trade_date company_name, trade_date
) )
args = self.propagator.get_graph_args() args = self.propagator.get_graph_args()
if self.debug: if self.debug:
# Debug mode with tracing # Debug mode with tracing
trace = [] trace = []
for chunk in self.graph.stream(init_agent_state, **args): for chunk in self.graph.stream(init_agent_state, **args):
if len(chunk["messages"]) == 0: if len(chunk["messages"]) == 0:
pass pass
else: else:
chunk["messages"][-1].pretty_print() chunk["messages"][-1].pretty_print()
trace.append(chunk) trace.append(chunk)
final_state = trace[-1] final_state = trace[-1]
else: else:
# Standard mode without tracing # Standard mode without tracing
final_state = self.graph.invoke(init_agent_state, **args) final_state = self.graph.invoke(init_agent_state, **args)
# Store current state for reflection # Store current state for reflection
self.curr_state = final_state self.curr_state = final_state
# Log state # Log state
self._log_state(trade_date, final_state) self._log_state(trade_date, final_state)
# Return decision and processed signal # Return decision and processed signal
return final_state, self.process_signal(final_state["final_trade_decision"]) return final_state, self.process_signal(final_state["final_trade_decision"])
def _log_state(self, trade_date, final_state): def _log_state(self, trade_date, final_state):
"""Log the final state to a JSON file.""" """Log the final state to a JSON file."""
self.log_states_dict[str(trade_date)] = { self.log_states_dict[str(trade_date)] = {
"company_of_interest": final_state["company_of_interest"], "company_of_interest": final_state["company_of_interest"],
"trade_date": final_state["trade_date"], "trade_date": final_state["trade_date"],
"market_report": final_state["market_report"], "market_report": final_state["market_report"],
"sentiment_report": final_state["sentiment_report"], "sentiment_report": final_state["sentiment_report"],
"news_report": final_state["news_report"], "news_report": final_state["news_report"],
"fundamentals_report": final_state["fundamentals_report"], "fundamentals_report": final_state["fundamentals_report"],
"investment_debate_state": { "investment_debate_state": {
"bull_history": final_state["investment_debate_state"]["bull_history"], "bull_history": final_state["investment_debate_state"]["bull_history"],
"bear_history": final_state["investment_debate_state"]["bear_history"], "bear_history": final_state["investment_debate_state"]["bear_history"],
"history": final_state["investment_debate_state"]["history"], "history": final_state["investment_debate_state"]["history"],
"current_response": final_state["investment_debate_state"][ "current_response": final_state["investment_debate_state"][
"current_response" "current_response"
], ],
"judge_decision": final_state["investment_debate_state"][ "judge_decision": final_state["investment_debate_state"][
"judge_decision" "judge_decision"
], ],
}, },
"trader_investment_decision": final_state["trader_investment_plan"], "trader_investment_decision": final_state["trader_investment_plan"],
"risk_debate_state": { "risk_debate_state": {
"risky_history": final_state["risk_debate_state"]["risky_history"], "risky_history": final_state["risk_debate_state"]["risky_history"],
"safe_history": final_state["risk_debate_state"]["safe_history"], "safe_history": final_state["risk_debate_state"]["safe_history"],
"neutral_history": final_state["risk_debate_state"]["neutral_history"], "neutral_history": final_state["risk_debate_state"]["neutral_history"],
"history": final_state["risk_debate_state"]["history"], "history": final_state["risk_debate_state"]["history"],
"judge_decision": final_state["risk_debate_state"]["judge_decision"], "judge_decision": final_state["risk_debate_state"]["judge_decision"],
}, },
"investment_plan": final_state["investment_plan"], "investment_plan": final_state["investment_plan"],
"final_trade_decision": final_state["final_trade_decision"], "final_trade_decision": final_state["final_trade_decision"],
} }
# Save to file # Save to file
directory = Path(f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/") directory = Path(f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/")
directory.mkdir(parents=True, exist_ok=True) directory.mkdir(parents=True, exist_ok=True)
with open( with open(
f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log.json", f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log.json",
"w", "w",
) as f: ) as f:
json.dump(self.log_states_dict, f, indent=4) json.dump(self.log_states_dict, f, indent=4)
def reflect_and_remember(self, returns_losses): def reflect_and_remember(self, returns_losses):
"""Reflect on decisions and update memory based on returns.""" """Reflect on decisions and update memory based on returns."""
self.reflector.reflect_bull_researcher( self.reflector.reflect_bull_researcher(
self.curr_state, returns_losses, self.bull_memory self.curr_state, returns_losses, self.bull_memory
) )
self.reflector.reflect_bear_researcher( self.reflector.reflect_bear_researcher(
self.curr_state, returns_losses, self.bear_memory self.curr_state, returns_losses, self.bear_memory
) )
self.reflector.reflect_trader( self.reflector.reflect_trader(
self.curr_state, returns_losses, self.trader_memory self.curr_state, returns_losses, self.trader_memory
) )
self.reflector.reflect_invest_judge( self.reflector.reflect_invest_judge(
self.curr_state, returns_losses, self.invest_judge_memory self.curr_state, returns_losses, self.invest_judge_memory
) )
self.reflector.reflect_risk_manager( self.reflector.reflect_risk_manager(
self.curr_state, returns_losses, self.risk_manager_memory self.curr_state, returns_losses, self.risk_manager_memory
) )
def process_signal(self, full_signal): def process_signal(self, full_signal):
"""Process a signal to extract the core decision.""" """Process a signal to extract the core decision."""
return self.signal_processor.process_signal(full_signal) return self.signal_processor.process_signal(full_signal)