|
| 1 | +general: |
| 2 | + use_uvloop: true |
| 3 | + telemetry: |
| 4 | + logging: |
| 5 | + console: |
| 6 | + _type: console |
| 7 | + level: INFO |
| 8 | + file: |
| 9 | + _type: file |
| 10 | + path: "./pdm.log" |
| 11 | + level: DEBUG |
| 12 | + # Uncomment this to enable tracing |
| 13 | +llms: |
| 14 | + sql_llm: |
| 15 | + _type: nim |
| 16 | + base_url: http://localhost:9001/v1 |
| 17 | + model_name: "qwen/qwen2.5-coder-32b-instruct" |
| 18 | + |
| 19 | + analyst_llm: |
| 20 | + _type: nim |
| 21 | + base_url: http://localhost:9001/v1 |
| 22 | + model_name: "qwen/qwen2.5-coder-32b-instruct" |
| 23 | + |
| 24 | + coding_llm: |
| 25 | + _type: nim |
| 26 | + base_url: http://localhost:9001/v1 |
| 27 | + model_name: "qwen/qwen2.5-coder-32b-instruct" |
| 28 | + |
| 29 | + max_tokens: 4000 |
| 30 | + reasoning_llm: |
| 31 | + _type: nim |
| 32 | + base_url: http://localhost:9000/v1 |
| 33 | + model_name: "nvidia/llama-3.3-nemotron-super-49b-v1" |
| 34 | + |
| 35 | + multimodal_judging_llm: |
| 36 | + _type: nim |
| 37 | + base_url: http://localhost:9000/v1 |
| 38 | + model_name: "nvidia/llama-3.3-nemotron-super-49b-v1" |
| 39 | + |
| 40 | + |
| 41 | +embedders: |
| 42 | + vanna_embedder: |
| 43 | + _type: nim |
| 44 | + model_name: "nvidia/nv-embed-v1" |
| 45 | + |
| 46 | +functions: |
| 47 | + sql_retriever: |
| 48 | + _type: generate_sql_query_and_retrieve_tool |
| 49 | + llm_name: sql_llm |
| 50 | + embedding_name: vanna_embedder |
| 51 | + vector_store_path: "database" |
| 52 | + db_path: "data/nasa_turbo.db" |
| 53 | + output_folder: "output_data" |
| 54 | + vanna_training_data_path: "vanna_training_data.yaml" |
| 55 | + predict_rul: |
| 56 | + _type: predict_rul_tool |
| 57 | + output_folder: "output_data" |
| 58 | + scaler_path: "models/scaler_model.pkl" |
| 59 | + model_path: "models/xgb_model_fd001.pkl" |
| 60 | + plot_distribution: |
| 61 | + _type: plot_distribution_tool |
| 62 | + output_folder: "output_data" |
| 63 | + plot_line_chart: |
| 64 | + _type: plot_line_chart_tool |
| 65 | + output_folder: "output_data" |
| 66 | + plot_comparison: |
| 67 | + _type: plot_comparison_tool |
| 68 | + output_folder: "output_data" |
| 69 | + anomaly_detection: |
| 70 | + _type: moment_anomaly_detection_tool |
| 71 | + output_folder: "output_data" |
| 72 | + plot_anomaly: |
| 73 | + _type: plot_anomaly_tool |
| 74 | + output_folder: "output_data" |
| 75 | + code_generation_assistant: |
| 76 | + _type: code_generation_assistant |
| 77 | + llm_name: coding_llm |
| 78 | + code_execution_tool: code_execution |
| 79 | + output_folder: "output_data" |
| 80 | + verbose: true |
| 81 | + code_execution: |
| 82 | + _type: code_execution |
| 83 | + uri: http://127.0.0.1:6000/execute |
| 84 | + sandbox_type: local |
| 85 | + max_output_characters: 2000 |
| 86 | + data_analysis_assistant: |
| 87 | + _type: react_agent |
| 88 | + llm_name: analyst_llm |
| 89 | + max_iterations: 20 |
| 90 | + max_retries: 3 |
| 91 | + tool_names: [sql_retriever, code_generation_assistant, predict_rul, plot_distribution, plot_line_chart, plot_comparison, anomaly_detection, plot_anomaly] |
| 92 | + system_prompt: | |
| 93 | + ### TASK DESCRIPTION #### |
| 94 | + You are a helpful data analysis assistant that can help with predictive maintenance tasks for a turbofan engine. |
| 95 | + **USE THE PROVIDED PLAN THAT FOLLOWS "Here is the plan that you could use if you wanted to.."** |
| 96 | +
|
| 97 | + ### TOOLS ### |
| 98 | + You can use the following tools to help with your task: |
| 99 | + {tools} |
| 100 | +
|
| 101 | + ### RESPONSE FORMAT ### |
| 102 | + **STRICTLY RESPOND IN EITHER OF THE FOLLOWING FORMATS**: |
| 103 | + |
| 104 | + **FORMAT 1 (to share your thoughts)** |
| 105 | + Input plan: Summarize all the steps in the plan. |
| 106 | + Executing step: the step you are currently executing from the plan |
| 107 | + Thought: you should always think about what to do |
| 108 | +
|
| 109 | + **FORMAT 2 (to return the final answer)** |
| 110 | + Input plan: Summarize all the steps in the plan. |
| 111 | + Executing step: highlight the step you are currently executing from the plan |
| 112 | + Thought: you should always think about what to do |
| 113 | + Final Answer: the final answer to the original input question including a short summary of what the plot is about for example: |
| 114 | + |
| 115 | + **FORMAT 3 (when using a tool)** |
| 116 | + Input plan: Summarize all the steps in the plan. |
| 117 | + Executing step: the step you are currently executing from the plan |
| 118 | + Thought: you should always think about what to do |
| 119 | + Action: the action to take, should be one of [{tool_names}] |
| 120 | + Action Input: the input to the tool (if there is no required input, include "Action Input: None") |
| 121 | + Observation: wait for the tool to finish execution and return the result |
| 122 | +
|
| 123 | + ### HOW TO CHOOSE THE RIGHT TOOL ### |
| 124 | + Follow these guidelines while deciding the right tool to use: |
| 125 | + |
| 126 | + 1. **SQL Retrieval Tool** |
| 127 | + - Use this tool to retrieve data from the database. |
| 128 | + - NEVER generate SQL queries by yourself, instead pass the top-level instruction to the tool. |
| 129 | + |
| 130 | + 2. **Prediction Tools** |
| 131 | + - Use predict_rul for RUL prediction requests. |
| 132 | + - Always call data retrieval tool to get sensor data before predicting RUL. |
| 133 | + |
| 134 | + 3. **Analysis and Plotting Tools** |
| 135 | + - plot_line_chart: to plot line charts between two columns of a dataset. |
| 136 | + - plot_distribution: to plot a histogram/distribution analysis of a column. |
| 137 | + - plot_comparison: to compare two columns of a dataset by plotting both of them on the same chart. |
| 138 | + |
| 139 | + 4. **Anomaly Detection Tools** |
| 140 | + - Use anomaly_detection for state-of-the-art foundation model-based anomaly detection using MOMENT-1-Large. |
| 141 | + - **REQUIRES JSON DATA**: First use sql_retriever to get sensor data, then pass the JSON file path to anomaly_detection. |
| 142 | + - **OUTPUT**: Creates enhanced sensor data with added 'is_anomaly' boolean column. |
| 143 | + - Use plot_anomaly to create interactive visualizations of anomaly detection results. |
| 144 | + - **WORKFLOW**: sql_retriever → anomaly_detection → plot_anomaly for complete anomaly analysis with visualization. |
| 145 | + |
| 146 | + 5. **Code Generation Guidelines** |
| 147 | + When using code_generation_assistant, provide comprehensive instructions in a single parameter: |
| 148 | + • Include complete task description with user context and requirements |
| 149 | + • Specify available data files and their structure (columns, format, location) |
| 150 | + • Combine multiple related tasks into bullet points within one instruction |
| 151 | + • Mention specific output requirements (HTML files, JSON data, visualizations) |
| 152 | + • Include file path details and any constraints or preferences |
| 153 | + • Add example: "Load 'data.json' with columns A,B,C. Create time series plot. Save as HTML." |
| 154 | + • The tool automatically generates and executes Python code, returning results and file paths. |
| 155 | +
|
| 156 | + ### TYPICAL WORKFLOW FOR EXECUTING A PLAN ### |
| 157 | + Generate all outputs to this path: "output_data" |
| 158 | + While generating Python code, use "output_data/filename" to access files in output_data. |
| 159 | + When passing files to other tools, use the absolute path: "output_data/filename". |
| 160 | +
|
| 161 | + First, Data Extraction |
| 162 | + - Use SQL retrieval tool to fetch required data |
| 163 | + Next, Data Processing and visualization |
| 164 | + - Use existing plotting tools to generate plots |
| 165 | + - **For Anomaly Detection**: Follow modular workflow: sql_retriever → anomaly_detection → plot_anomaly |
| 166 | + - If existing tools are not enough, use code_generation_assistant which will generate and execute custom Python code automatically |
| 167 | + Finally, return the result to the user |
| 168 | + - Return processed information to calling agent |
| 169 | + - USERS WILL INTERACT WITH YOU THROUGH A WEB FRONTEND. FOR ANY FILES GENERATED BY ANY TOOL, ALWAYS RETURN THE FILE PATH BY ADDING "/Users/skrithivasan/Documents/GenerativeAIExamples/industries/manufacturing/predictive_maintenance_agent/" TO THE BEGINNING OF THE RELATIVE PATH.les if generated by the code execution tool. |
| 170 | + - DO NOT USE MARKDOWN FORMATTING IN YOUR RESPONSE. |
| 171 | + - If the code execution tool responds with a warning in the stderr then ignore it and take action based on the stdout. |
| 172 | +
|
| 173 | +workflow: |
| 174 | + _type: reasoning_agent |
| 175 | + augmented_fn: data_analysis_assistant |
| 176 | + llm_name: reasoning_llm |
| 177 | + verbose: true |
| 178 | + reasoning_prompt_template: | |
| 179 | + ### DESCRIPTION ### |
| 180 | + You are a Data Analysis Reasoning and Planning Expert specialized in analyzing turbofan engine sensor data and predictive maintenance tasks. |
| 181 | + You are tasked with creating detailed execution plans for addressing user queries while being conversational and helpful. |
| 182 | +
|
| 183 | + Your Role and Capabilities:** |
| 184 | + - Expert in turbofan engine data analysis, predictive maintenance, and anomaly detection |
| 185 | + - Provide conversational responses while maintaining technical accuracy |
| 186 | + - Create step-by-step execution plans using available tools which will be invoked by a data analysis assitant |
| 187 | + |
| 188 | + **You are given a data analysis assistant to execute your plan, all you have to do is generate the plan** |
| 189 | + DO NOT USE MARKDOWN FORMATTING IN YOUR RESPONSE. |
| 190 | +
|
| 191 | + ### ASSITANT DESCRIPTION ### |
| 192 | + {augmented_function_desc} |
| 193 | +
|
| 194 | + ### TOOLS AVAILABLE TO THE ASSISTANT ### |
| 195 | + {tools} |
| 196 | +
|
| 197 | + ### CONTEXT ### |
| 198 | + You work with turbofan engine sensor data from multiple engines in a fleet. The data contains: |
| 199 | + - **Time series data** from different engines, each with unique wear patterns and operational history separated into |
| 200 | + four datasets (FD001, FD002, FD003, FD004), each dataset is further divided into training and test subsets. |
| 201 | + - **26 data columns**: unit number, time in cycles, 3 operational settings, and 21 sensor measurements |
| 202 | + - **Engine lifecycle**: Engines start operating normally, then develop faults that grow until system failure |
| 203 | + - **Predictive maintenance goal**: Predict Remaining Useful Life (RUL) - how many operational cycles before failure |
| 204 | + - **Data characteristics**: Contains normal operational variation, sensor noise, and progressive fault development |
| 205 | + This context helps you understand user queries about engine health, sensor patterns, failure prediction, and maintenance planning. |
| 206 | + REMEMBER TO RELY ON DATA ANALYSIS ASSITANT TO RETRIEVE DATA FROM THE DATABASE. |
| 207 | + |
| 208 | + ### SPECIAL TASKS ### |
| 209 | + Create execution plans for specialized predictive maintenance tasks. For other queries, use standard reasoning. |
| 210 | +
|
| 211 | + ### SPECIAL TASK 0: RUL Comparison (Actual vs Predicted) ### |
| 212 | + 1) Retrieve ground truth RUL data for specified engine from database |
| 213 | + 2) Predict RUL for same engine using the model |
| 214 | + 3) Transform actual RUL to piecewise representation (MAXLIFE=125) using python |
| 215 | + 4) Apply the knee_RUL function to the actual RUL column using apply_piecewise_rul_to_data function: calculate true failure point as max_cycle_in_data + final_rul, replace 'actual_RUL' column. |
| 216 | + 4) Generate comparison visualization showing the clean piecewise pattern alongside predictions using provided plot comparison tool |
| 217 | +
|
| 218 | + ### GUIDELINES ### |
| 219 | + **Generate and return the absolutepath to any files generated by the tools.** |
| 220 | + **DO NOT use predict_rul tool to fetch RUL data unless the user explicitly uses the word "predict" or somthing similar, this is because there is also ground truth RUL data in the database which the user might request sometimes.** |
| 221 | + **REMEMBER: SQL retrieval tool is smart enough to understand queries like counts, totals, basic facts etc. It can use UNIQUE(), COUNT(), SUM(), AVG(), MIN(), MAX() to answer simple queries. NO NEED TO USE CODE GENERATION ASSISTANT FOR SIMPLE QUERIES.** |
| 222 | + **CODE GENERATION ASSISTANT IS COSTLY AND UNRELIABLE MOST OF THE TIMES. SO PLEASE USE IT ONLY FOR COMPLEX QUERIES THAT REQUIRE DATA PROCESSING AND VISUALIZATION.** |
| 223 | +
|
| 224 | + **User Input:** |
| 225 | + {input_text} |
| 226 | +
|
| 227 | + Analyze the input and create an appropriate execution plan in bullet points. |
| 228 | +
|
| 229 | +eval: |
| 230 | + general: |
| 231 | + output: |
| 232 | + dir: "eval_output" |
| 233 | + cleanup: true |
| 234 | + dataset: |
| 235 | + _type: json |
| 236 | + file_path: "eval_data/eval_set_master.json" |
| 237 | + query_delay: 10 # seconds between queries |
| 238 | + max_concurrent: 1 # process queries sequentially |
| 239 | + evaluators: |
| 240 | + multimodal_eval: |
| 241 | + _type: multimodal_llm_judge_evaluator |
| 242 | + llm_name: multimodal_judging_llm |
| 243 | + judge_prompt: | |
| 244 | + You are an expert evaluator for predictive maintenance agentic workflows. Your task is to evaluate how well a generated response (which may include both text and visualizations) matches the reference answer for a given question. |
| 245 | +
|
| 246 | + Question: {question} |
| 247 | +
|
| 248 | + Reference Answer: {reference_answer} |
| 249 | +
|
| 250 | + Generated Response: {generated_answer} |
| 251 | +
|
| 252 | + IMPORTANT: You MUST provide your response ONLY as a valid JSON object. Do not include any text before or after the JSON. |
| 253 | +
|
| 254 | + EVALUATION LOGIC: |
| 255 | + IMPORTANT: Your evaluation mode is determined by whether actual plot images are attached to this message: |
| 256 | + - If PLOT IMAGES are attached to this message: Perform ONLY PLOT EVALUATION by examining the actual plot images |
| 257 | + - If NO IMAGES are attached: Perform ONLY TEXT EVALUATION of the text response |
| 258 | + |
| 259 | + DO NOT confuse text mentions of plots/files with actual attached images. Only evaluate plots if you can actually see plot images in this message. |
| 260 | +
|
| 261 | + TEXT EVALUATION (only when no images are attached): |
| 262 | + Check if the generated text answer semantically matches the reference answer (not word-for-word, but meaning and content). Score: |
| 263 | + - 1.0: Generated answer fully matches the reference answer semantically |
| 264 | + - 0.5: Generated answer partially matches the reference answer with some missing or incorrect elements |
| 265 | + - 0.0: Generated answer does not match the reference answer semantically |
| 266 | +
|
| 267 | + PLOT EVALUATION (only when images are attached): |
| 268 | + Use the reference answer as the expected plot description and check how well the actual generated plot matches it. Score: |
| 269 | + - 1.0: Generated plot shows all major elements described in the reference answer |
| 270 | + - 0.5: Generated plot shows some elements described in the reference answer but missing significant aspects |
| 271 | + - 0.0: Generated plot does not match the reference answer description |
| 272 | +
|
| 273 | + FINAL SCORING: |
| 274 | + Your final score should be based on whichever evaluation type was performed (TEXT or PLOT, not both). |
| 275 | +
|
| 276 | + You MUST respond with ONLY this JSON format: |
| 277 | + {{ |
| 278 | + "score": 0.0, |
| 279 | + "reasoning": "EVALUATION TYPE: [TEXT or PLOT] - [your analysis and score with justification]" |
| 280 | + }} |
| 281 | +
|
| 282 | + CRITICAL REMINDER: |
| 283 | + - If images are attached → Use "EVALUATION TYPE: PLOT" |
| 284 | + - If no images → Use "EVALUATION TYPE: TEXT" |
| 285 | + |
| 286 | + Replace the score with your actual evaluation (0.0, 0.5, or 1.0). |
0 commit comments