Skip to content

Commit 08b3e07

Browse files
authored
chore: simplify local index code (#537)
1 parent 1876950 commit 08b3e07

File tree

4 files changed

+17
-21
lines changed

4 files changed

+17
-21
lines changed

.changeset/grumpy-tigers-heal.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"create-llama": patch
3+
---
4+
5+
Simplify the local index code.

templates/components/workflows/python/agentic_rag/workflow.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,12 @@
99

1010

1111
def create_workflow(chat_request: Optional[ChatRequest] = None) -> AgentWorkflow:
12-
query_tool = get_query_engine_tool(index=get_index(chat_request=chat_request))
13-
if query_tool is None:
12+
index = get_index(chat_request=chat_request)
13+
if index is None:
1414
raise RuntimeError(
1515
"Index not found! Please run `poetry run generate` to index the data first."
1616
)
17+
query_tool = get_query_engine_tool(index=index)
1718
return AgentWorkflow.from_tools_or_functions(
1819
tools_or_functions=[query_tool],
1920
llm=Settings.llm or OpenAI(model="gpt-4o-mini"),

templates/types/llamaindexserver/fastapi/app/index.py

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,28 +5,19 @@
55
from llama_index.core.indices import load_index_from_storage
66
from llama_index.server.api.models import ChatRequest
77
from llama_index.server.tools.index.utils import get_storage_context
8-
from pydantic import BaseModel
98

109
logger = logging.getLogger("uvicorn")
1110

12-
13-
class IndexConfig(BaseModel):
14-
storage_dir: str = "storage"
15-
16-
@classmethod
17-
def from_default(cls, chat_request: Optional[ChatRequest] = None) -> "IndexConfig":
18-
return cls()
11+
STORAGE_DIR = "storage"
1912

2013

2114
def get_index(chat_request: Optional[ChatRequest] = None):
22-
config = IndexConfig.from_default(chat_request)
23-
storage_dir = config.storage_dir
2415
# check if storage already exists
25-
if not os.path.exists(storage_dir):
16+
if not os.path.exists(STORAGE_DIR):
2617
return None
2718
# load the existing index
28-
logger.info(f"Loading index from {storage_dir}...")
29-
storage_context = get_storage_context(storage_dir)
19+
logger.info(f"Loading index from {STORAGE_DIR}...")
20+
storage_context = get_storage_context(STORAGE_DIR)
3021
index = load_index_from_storage(storage_context)
31-
logger.info(f"Finished loading index from {storage_dir}")
22+
logger.info(f"Finished loading index from {STORAGE_DIR}")
3223
return index

templates/types/llamaindexserver/fastapi/generate.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import logging
22
import os
33

4-
from dotenv import load_dotenv
5-
4+
from app.index import STORAGE_DIR
65
from app.settings import init_settings
6+
from dotenv import load_dotenv
77
from llama_index.core.indices import (
88
VectorStoreIndex,
99
)
@@ -18,7 +18,6 @@ def generate_datasource():
1818
init_settings()
1919

2020
logger.info("Creating new index")
21-
storage_dir = os.environ.get("STORAGE_DIR", "storage")
2221
# load the documents and create the index
2322
reader = SimpleDirectoryReader(
2423
os.environ.get("DATA_DIR", "data"),
@@ -30,5 +29,5 @@ def generate_datasource():
3029
show_progress=True,
3130
)
3231
# store it for later
33-
index.storage_context.persist(storage_dir)
34-
logger.info(f"Finished creating new index. Stored in {storage_dir}")
32+
index.storage_context.persist(STORAGE_DIR)
33+
logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")

0 commit comments

Comments
 (0)