Skip to content

Commit 9ecd061

Browse files
feat: Add llama-agent template (#150)
--------- Co-authored-by: Marcus Schiesser <[email protected]>
1 parent 344d832 commit 9ecd061

File tree

17 files changed

+494
-52
lines changed

17 files changed

+494
-52
lines changed

.changeset/shy-horses-think.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"create-llama": patch
3+
---
4+
5+
Add new template for a multi-agents app

helpers/env-variables.ts

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import {
55
ModelConfig,
66
TemplateDataSource,
77
TemplateFramework,
8+
TemplateType,
89
TemplateVectorDB,
910
} from "./types";
1011

@@ -378,6 +379,36 @@ const getSystemPromptEnv = (tools?: Tool[]): EnvVar => {
378379
};
379380
};
380381

382+
const getTemplateEnvs = (template?: TemplateType): EnvVar[] => {
383+
if (template === "multiagent") {
384+
return [
385+
{
386+
name: "MESSAGE_QUEUE_PORT",
387+
},
388+
{
389+
name: "CONTROL_PLANE_PORT",
390+
},
391+
{
392+
name: "HUMAN_CONSUMER_PORT",
393+
},
394+
{
395+
name: "AGENT_QUERY_ENGINE_PORT",
396+
value: "8003",
397+
},
398+
{
399+
name: "AGENT_QUERY_ENGINE_DESCRIPTION",
400+
value: "Query information from the provided data",
401+
},
402+
{
403+
name: "AGENT_DUMMY_PORT",
404+
value: "8004",
405+
},
406+
];
407+
} else {
408+
return [];
409+
}
410+
};
411+
381412
export const createBackendEnvFile = async (
382413
root: string,
383414
opts: {
@@ -386,6 +417,7 @@ export const createBackendEnvFile = async (
386417
modelConfig: ModelConfig;
387418
framework: TemplateFramework;
388419
dataSources?: TemplateDataSource[];
420+
template?: TemplateType;
389421
port?: number;
390422
tools?: Tool[];
391423
},
@@ -406,6 +438,8 @@ export const createBackendEnvFile = async (
406438
...getVectorDBEnvs(opts.vectorDb, opts.framework),
407439
...getFrameworkEnvs(opts.framework, opts.port),
408440
...getToolEnvs(opts.tools),
441+
// Add template environment variables
442+
...getTemplateEnvs(opts.template),
409443
getSystemPromptEnv(opts.tools),
410444
];
411445
// Render and write env file

helpers/index.ts

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -141,15 +141,18 @@ export const installTemplate = async (
141141
// This is a backend, so we need to copy the test data and create the env file.
142142

143143
// Copy the environment file to the target directory.
144-
await createBackendEnvFile(props.root, {
145-
modelConfig: props.modelConfig,
146-
llamaCloudKey: props.llamaCloudKey,
147-
vectorDb: props.vectorDb,
148-
framework: props.framework,
149-
dataSources: props.dataSources,
150-
port: props.externalPort,
151-
tools: props.tools,
152-
});
144+
if (props.template === "streaming" || props.template === "multiagent") {
145+
await createBackendEnvFile(props.root, {
146+
modelConfig: props.modelConfig,
147+
llamaCloudKey: props.llamaCloudKey,
148+
vectorDb: props.vectorDb,
149+
framework: props.framework,
150+
dataSources: props.dataSources,
151+
port: props.externalPort,
152+
tools: props.tools,
153+
template: props.template,
154+
});
155+
}
153156

154157
if (props.dataSources.length > 0) {
155158
console.log("\nGenerating context data...\n");

helpers/python.ts

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -320,20 +320,27 @@ export const installPythonTemplate = async ({
320320
cwd: path.join(compPath, "loaders", "python"),
321321
});
322322

323-
// Select and copy engine code based on data sources and tools
324-
let engine;
325-
tools = tools ?? [];
326-
if (dataSources.length > 0 && tools.length === 0) {
327-
console.log("\nNo tools selected - use optimized context chat engine\n");
328-
engine = "chat";
329-
} else {
330-
engine = "agent";
331-
}
332-
await copy("**", enginePath, {
333-
parents: true,
334-
cwd: path.join(compPath, "engines", "python", engine),
323+
// Copy settings.py to app
324+
await copy("**", path.join(root, "app"), {
325+
cwd: path.join(compPath, "settings", "python"),
335326
});
336327

328+
if (template === "streaming") {
329+
// For the streaming template only:
330+
// Select and copy engine code based on data sources and tools
331+
let engine;
332+
if (dataSources.length > 0 && (!tools || tools.length === 0)) {
333+
console.log("\nNo tools selected - use optimized context chat engine\n");
334+
engine = "chat";
335+
} else {
336+
engine = "agent";
337+
}
338+
await copy("**", enginePath, {
339+
parents: true,
340+
cwd: path.join(compPath, "engines", "python", engine),
341+
});
342+
}
343+
337344
console.log("Adding additional dependencies");
338345

339346
const addOnDependencies = getAdditionalDependencies(

helpers/types.ts

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,11 @@ export type ModelConfig = {
1616
dimensions: number;
1717
isConfigured(): boolean;
1818
};
19-
export type TemplateType = "streaming" | "community" | "llamapack";
19+
export type TemplateType =
20+
| "streaming"
21+
| "community"
22+
| "llamapack"
23+
| "multiagent";
2024
export type TemplateFramework = "nextjs" | "express" | "fastapi";
2125
export type TemplateUI = "html" | "shadcn";
2226
export type TemplateVectorDB =

questions.ts

Lines changed: 46 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import {
99
TemplateDataSource,
1010
TemplateDataSourceType,
1111
TemplateFramework,
12+
TemplateType,
1213
} from "./helpers";
1314
import { COMMUNITY_OWNER, COMMUNITY_REPO } from "./helpers/constant";
1415
import { EXAMPLE_FILE } from "./helpers/datasources";
@@ -122,6 +123,7 @@ const getVectorDbChoices = (framework: TemplateFramework) => {
122123
export const getDataSourceChoices = (
123124
framework: TemplateFramework,
124125
selectedDataSource: TemplateDataSource[],
126+
template?: TemplateType,
125127
) => {
126128
// If LlamaCloud is already selected, don't show any other options
127129
if (selectedDataSource.find((s) => s.type === "llamacloud")) {
@@ -137,10 +139,12 @@ export const getDataSourceChoices = (
137139
});
138140
}
139141
if (selectedDataSource === undefined || selectedDataSource.length === 0) {
140-
choices.push({
141-
title: "No data, just a simple chat or agent",
142-
value: "none",
143-
});
142+
if (template !== "multiagent") {
143+
choices.push({
144+
title: "No data, just a simple chat or agent",
145+
value: "none",
146+
});
147+
}
144148
choices.push({
145149
title:
146150
process.platform !== "linux"
@@ -281,25 +285,27 @@ export const askQuestions = async (
281285
},
282286
];
283287

284-
const modelConfigured =
285-
!program.llamapack && program.modelConfig.isConfigured();
286-
// If using LlamaParse, require LlamaCloud API key
287-
const llamaCloudKeyConfigured = program.useLlamaParse
288-
? program.llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
289-
: true;
290-
const hasVectorDb = program.vectorDb && program.vectorDb !== "none";
291-
// Can run the app if all tools do not require configuration
292-
if (
293-
!hasVectorDb &&
294-
modelConfigured &&
295-
llamaCloudKeyConfigured &&
296-
!toolsRequireConfig(program.tools)
297-
) {
298-
actionChoices.push({
299-
title:
300-
"Generate code, install dependencies, and run the app (~2 min)",
301-
value: "runApp",
302-
});
288+
if (program.template !== "multiagent") {
289+
const modelConfigured =
290+
!program.llamapack && program.modelConfig.isConfigured();
291+
// If using LlamaParse, require LlamaCloud API key
292+
const llamaCloudKeyConfigured = program.useLlamaParse
293+
? program.llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
294+
: true;
295+
const hasVectorDb = program.vectorDb && program.vectorDb !== "none";
296+
// Can run the app if all tools do not require configuration
297+
if (
298+
!hasVectorDb &&
299+
modelConfigured &&
300+
llamaCloudKeyConfigured &&
301+
!toolsRequireConfig(program.tools)
302+
) {
303+
actionChoices.push({
304+
title:
305+
"Generate code, install dependencies, and run the app (~2 min)",
306+
value: "runApp",
307+
});
308+
}
303309
}
304310

305311
const { action } = await prompts(
@@ -331,7 +337,11 @@ export const askQuestions = async (
331337
name: "template",
332338
message: "Which template would you like to use?",
333339
choices: [
334-
{ title: "Chat", value: "streaming" },
340+
{ title: "Agentic RAG (single agent)", value: "streaming" },
341+
{
342+
title: "Multi-agent app (using llama-agents)",
343+
value: "multiagent",
344+
},
335345
{
336346
title: `Community template from ${styledRepo}`,
337347
value: "community",
@@ -395,6 +405,10 @@ export const askQuestions = async (
395405
return; // early return - no further questions needed for llamapack projects
396406
}
397407

408+
if (program.template === "multiagent") {
409+
// TODO: multi-agents currently only supports FastAPI
410+
program.framework = preferences.framework = "fastapi";
411+
}
398412
if (!program.framework) {
399413
if (ciInfo.isCI) {
400414
program.framework = getPrefOrDefault("framework");
@@ -420,7 +434,10 @@ export const askQuestions = async (
420434
}
421435
}
422436

423-
if (program.framework === "express" || program.framework === "fastapi") {
437+
if (
438+
(program.framework === "express" || program.framework === "fastapi") &&
439+
program.template === "streaming"
440+
) {
424441
// if a backend-only framework is selected, ask whether we should create a frontend
425442
if (program.frontend === undefined) {
426443
if (ciInfo.isCI) {
@@ -457,7 +474,7 @@ export const askQuestions = async (
457474
}
458475
}
459476

460-
if (!program.observability) {
477+
if (!program.observability && program.template === "streaming") {
461478
if (ciInfo.isCI) {
462479
program.observability = getPrefOrDefault("observability");
463480
} else {
@@ -501,6 +518,7 @@ export const askQuestions = async (
501518
const choices = getDataSourceChoices(
502519
program.framework,
503520
program.dataSources,
521+
program.template,
504522
);
505523
if (choices.length === 0) break;
506524
const { selectedSource } = await prompts(
@@ -695,7 +713,8 @@ export const askQuestions = async (
695713
}
696714
}
697715

698-
if (!program.tools) {
716+
if (!program.tools && program.template === "streaming") {
717+
// TODO: allow to select tools also for multi-agent framework
699718
if (ciInfo.isCI) {
700719
program.tools = getPrefOrDefault("tools");
701720
} else {
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
from llama_index.embeddings.openai import OpenAIEmbedding
2+
from llama_index.core.settings import Settings
3+
from typing import Dict
4+
import os
5+
6+
DEFAULT_MODEL = "gpt-3.5-turbo"
7+
DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
8+
9+
class TSIEmbedding(OpenAIEmbedding):
10+
def __init__(self, **kwargs):
11+
super().__init__(**kwargs)
12+
self._query_engine = self._text_engine = self.model_name
13+
14+
def llm_config_from_env() -> Dict:
15+
from llama_index.core.constants import DEFAULT_TEMPERATURE
16+
17+
model = os.getenv("MODEL", DEFAULT_MODEL)
18+
temperature = os.getenv("LLM_TEMPERATURE", DEFAULT_TEMPERATURE)
19+
max_tokens = os.getenv("LLM_MAX_TOKENS")
20+
api_key = os.getenv("T_SYSTEMS_LLMHUB_API_KEY")
21+
api_base = os.getenv("T_SYSTEMS_LLMHUB_BASE_URL")
22+
23+
config = {
24+
"model": model,
25+
"api_key": api_key,
26+
"api_base": api_base,
27+
"temperature": float(temperature),
28+
"max_tokens": int(max_tokens) if max_tokens is not None else None,
29+
}
30+
return config
31+
32+
33+
def embedding_config_from_env() -> Dict:
34+
from llama_index.core.constants import DEFAULT_EMBEDDING_DIM
35+
36+
model = os.getenv("EMBEDDING_MODEL", DEFAULT_EMBEDDING_MODEL)
37+
dimension = os.getenv("EMBEDDING_DIM", DEFAULT_EMBEDDING_DIM)
38+
api_key = os.getenv("T_SYSTEMS_LLMHUB_API_KEY")
39+
api_base = os.getenv("T_SYSTEMS_LLMHUB_BASE_URL")
40+
41+
config = {
42+
"model_name": model,
43+
"dimension": int(dimension) if dimension is not None else None,
44+
"api_key": api_key,
45+
"api_base": api_base,
46+
}
47+
return config
48+
49+
def init_llmhub():
50+
from llama_index.llms.openai_like import OpenAILike
51+
52+
llm_configs = llm_config_from_env()
53+
embedding_configs = embedding_config_from_env()
54+
55+
Settings.embed_model = TSIEmbedding(**embedding_configs)
56+
Settings.llm = OpenAILike(
57+
**llm_configs,
58+
is_chat_model=True,
59+
is_function_calling_model=False,
60+
context_window=4096,
61+
)

templates/types/streaming/fastapi/app/settings.py renamed to templates/components/settings/python/settings.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from typing import Dict
33

44
from llama_index.core.settings import Settings
5-
from .llmhub import init_llmhub
5+
66

77
def init_settings():
88
model_provider = os.getenv("MODEL_PROVIDER")
@@ -20,6 +20,8 @@ def init_settings():
2020
case "azure-openai":
2121
init_azure_openai()
2222
case "t-systems":
23+
from .llmhub import init_llmhub
24+
2325
init_llmhub()
2426
case _:
2527
raise ValueError(f"Invalid model provider: {model_provider}")
@@ -147,5 +149,3 @@ def init_gemini():
147149

148150
Settings.llm = Gemini(model=model_name)
149151
Settings.embed_model = GeminiEmbedding(model_name=embed_model_name)
150-
151-

0 commit comments

Comments
 (0)