File tree Expand file tree Collapse file tree 4 files changed +17
-21
lines changed 
components/workflows/python/agentic_rag 
types/llamaindexserver/fastapi Expand file tree Collapse file tree 4 files changed +17
-21
lines changed Original file line number Diff line number Diff line change 1+ --- 
2+ " create-llama " patch 
3+ --- 
4+ 
5+ Simplify the local index code.
Original file line number Diff line number Diff line change 99
1010
1111def  create_workflow (chat_request : Optional [ChatRequest ] =  None ) ->  AgentWorkflow :
12-     query_tool  =  get_query_engine_tool ( index = get_index (chat_request = chat_request ) )
13-     if  query_tool  is  None :
12+     index  =  get_index (chat_request = chat_request )
13+     if  index  is  None :
1414        raise  RuntimeError (
1515            "Index not found! Please run `poetry run generate` to index the data first." 
1616        )
17+     query_tool  =  get_query_engine_tool (index = index )
1718    return  AgentWorkflow .from_tools_or_functions (
1819        tools_or_functions = [query_tool ],
1920        llm = Settings .llm  or  OpenAI (model = "gpt-4o-mini" ),
Original file line number Diff line number Diff line change 55from  llama_index .core .indices  import  load_index_from_storage 
66from  llama_index .server .api .models  import  ChatRequest 
77from  llama_index .server .tools .index .utils  import  get_storage_context 
8- from  pydantic  import  BaseModel 
98
109logger  =  logging .getLogger ("uvicorn" )
1110
12- 
13- class  IndexConfig (BaseModel ):
14-     storage_dir : str  =  "storage" 
15- 
16-     @classmethod  
17-     def  from_default (cls , chat_request : Optional [ChatRequest ] =  None ) ->  "IndexConfig" :
18-         return  cls ()
11+ STORAGE_DIR  =  "storage" 
1912
2013
2114def  get_index (chat_request : Optional [ChatRequest ] =  None ):
22-     config  =  IndexConfig .from_default (chat_request )
23-     storage_dir  =  config .storage_dir 
2415    # check if storage already exists 
25-     if  not  os .path .exists (storage_dir ):
16+     if  not  os .path .exists (STORAGE_DIR ):
2617        return  None 
2718    # load the existing index 
28-     logger .info (f"Loading index from { storage_dir }  )
29-     storage_context  =  get_storage_context (storage_dir )
19+     logger .info (f"Loading index from { STORAGE_DIR }  )
20+     storage_context  =  get_storage_context (STORAGE_DIR )
3021    index  =  load_index_from_storage (storage_context )
31-     logger .info (f"Finished loading index from { storage_dir }  )
22+     logger .info (f"Finished loading index from { STORAGE_DIR }  )
3223    return  index 
Original file line number Diff line number Diff line change 11import  logging 
22import  os 
33
4- from  dotenv  import  load_dotenv 
5- 
4+ from  app .index  import  STORAGE_DIR 
65from  app .settings  import  init_settings 
6+ from  dotenv  import  load_dotenv 
77from  llama_index .core .indices  import  (
88    VectorStoreIndex ,
99)
@@ -18,7 +18,6 @@ def generate_datasource():
1818    init_settings ()
1919
2020    logger .info ("Creating new index" )
21-     storage_dir  =  os .environ .get ("STORAGE_DIR" , "storage" )
2221    # load the documents and create the index 
2322    reader  =  SimpleDirectoryReader (
2423        os .environ .get ("DATA_DIR" , "data" ),
@@ -30,5 +29,5 @@ def generate_datasource():
3029        show_progress = True ,
3130    )
3231    # store it for later 
33-     index .storage_context .persist (storage_dir )
34-     logger .info (f"Finished creating new index. Stored in { storage_dir }  )
32+     index .storage_context .persist (STORAGE_DIR )
33+     logger .info (f"Finished creating new index. Stored in { STORAGE_DIR }  )
 
 
   
 
     
   
   
          
    
    
     
    
      
     
     
    You can’t perform that action at this time.
  
 
    
  
    
      
        
     
       
      
     
   
 
    
    
  
 
  
 
     
    
0 commit comments