39 lines
1.6 KiB
YAML
39 lines
1.6 KiB
YAML
version: '3.8' # Specify a version
|
|
|
|
services:
|
|
app:
|
|
build: . # Use the Dockerfile in the current directory
|
|
volumes:
|
|
- .:/app # Mount current directory to /app in container for live code changes
|
|
- ./.ollama:/app/.ollama # Cache Ollama models
|
|
- ./data:/app/data # Mount data directory for data files
|
|
env_file:
|
|
- .env # Load environment variables from files.env
|
|
#environment:
|
|
# - LLM_PROVIDER=ollama
|
|
# - LLM_BACKEND_URL=http://localhost:11434/v1
|
|
# - LLM_DEEP_THINK_MODEL=qwen3:0.6b
|
|
# - LLM_QUICK_THINK_MODEL=qwen3:0.6b
|
|
# - LLM_EMBEDDING_MODEL=nomic-embed-text
|
|
# - MAX_DEBATE_ROUNDS=1
|
|
# - ONLINE_TOOLS=False
|
|
# The default command in the Dockerfile is `python main.py`.
|
|
# For running tests with compose, one can use:
|
|
# docker-compose run --rm app pytest tests/test_main.py
|
|
# Or, we can set a default command here to run tests:
|
|
#command: python test_ollama_connection.py # Uncomment to run a specific test script
|
|
#command: python -m cli.main # Uncomment to run cli interface
|
|
#command: python -m main # uncomment to run the main application
|
|
tty: true # Keep the container running
|
|
stdin_open: true # Keep stdin open for interactive mode
|
|
|
|
# Uncomment the following lines to enable GPU support
|
|
# For more information, refer to the Docker documentation: https://docs.docker.com/compose/how-tos/gpu-support/
|
|
#deploy:
|
|
# resources:
|
|
# reservations:
|
|
# devices:
|
|
# - capabilities: ["gpu"]
|
|
ports:
|
|
- "11434:11434" # Expose port 11434 for Ollama
|