-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
67 lines (47 loc) · 2.13 KB
/
.env.example
File metadata and controls
67 lines (47 loc) · 2.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# =============================================================================
# Local LLM Configuration (for agent_local.py)
# =============================================================================
LOCAL_MODEL_NAME=Qwen/Qwen2.5-7B-Instruct
# Device to run the model on
LOCAL_MODEL_DEVICE=auto
# Maximum number of new tokens to generate per response
LOCAL_MODEL_MAX_NEW_TOKENS=256
# Temperature for generation (0.0 = deterministic, 1.0 = creative)
LOCAL_MODEL_TEMPERATURE=0.2
# Data type for model weights
LOCAL_MODEL_DTYPE=float16
# =============================================================================
# Agent Configuration
# =============================================================================
# Whether to use RAG (Retrieval-Augmented Generation) for episodic memory
USE_RAG=false
# Whether to render the game frames in a matplotlib window
# Set to false for headless/server environments
RENDER=false
# Number of test runs to execute
TEST_RUNS=5
# Maximum frames per run (episode will stop after this many frames)
MAX_FRAMES=500
# Whether to enforce the max frames limit
USE_MAX_FRAMES=true
# Embedding method identifier (used for logging results)
EMBEDDING_METHOD=local_llm
# =============================================================================
# MineRL Server Configuration
# =============================================================================
# Whether to use a remote MineRL server instead of local Docker
USE_REMOTE_SERVER=false
# URL of the MineRL server (local or remote)
MINERL_SERVER_URL=http://127.0.0.1:5001
# =============================================================================
# OpenAI Configuration (for original agent.py, not needed for agent_local.py)
# =============================================================================
# OPENAI_API_KEY=sk-your-key-here
USE_OPENAI_LLM=true
OPENAI_MODEL_NAME=gpt-5-nano-2025-08-07
OPENAI_TEMPERATURE=0.2
# =============================================================================
# Vector Database Configura
# =============================================================================
CHROMA_HOST=http://127.0.0.1:8000
CHROMA_PORT=8000